diff --git a/vendor/github.com/docker/docker/api/types/BUILD.bazel b/vendor/github.com/docker/docker/api/types/BUILD.bazel deleted file mode 100644 index e9b2ec2190a21..0000000000000 --- a/vendor/github.com/docker/docker/api/types/BUILD.bazel +++ /dev/null @@ -1,40 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "auth.go", - "client.go", - "configs.go", - "error_response.go", - "graph_driver_data.go", - "id_response.go", - "image_delete_response_item.go", - "image_summary.go", - "plugin.go", - "plugin_device.go", - "plugin_env.go", - "plugin_interface_type.go", - "plugin_mount.go", - "plugin_responses.go", - "port.go", - "seccomp.go", - "service_update_response.go", - "stats.go", - "types.go", - "volume.go", - ], - importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types", - importpath = "github.com/docker/docker/api/types", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/docker/api/types/container:go_default_library", - "//vendor/github.com/docker/docker/api/types/filters:go_default_library", - "//vendor/github.com/docker/docker/api/types/mount:go_default_library", - "//vendor/github.com/docker/docker/api/types/network:go_default_library", - "//vendor/github.com/docker/docker/api/types/registry:go_default_library", - "//vendor/github.com/docker/docker/api/types/swarm:go_default_library", - "//vendor/github.com/docker/go-connections/nat:go_default_library", - "//vendor/github.com/docker/go-units:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go deleted file mode 100644 index ddf15bb182dd7..0000000000000 --- a/vendor/github.com/docker/docker/api/types/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/BUILD.bazel b/vendor/github.com/docker/docker/api/types/blkiodev/BUILD.bazel deleted file mode 100644 index 1114b8e1da42c..0000000000000 --- a/vendor/github.com/docker/docker/api/types/blkiodev/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["blkio.go"], - importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/blkiodev", - importpath = "github.com/docker/docker/api/types/blkiodev", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go deleted file mode 100644 index bf3463b90e711..0000000000000 --- a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev // import "github.com/docker/docker/api/types/blkiodev" - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go deleted file mode 100644 index 3d2e057c9aac1..0000000000000 --- a/vendor/github.com/docker/docker/api/types/client.go +++ /dev/null @@ -1,390 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "bufio" - "io" - "net" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/go-units" -) - -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - CheckpointDir string - Exit bool -} - -// CheckpointListOptions holds parameters to list checkpoints for a container -type CheckpointListOptions struct { - CheckpointDir string -} - -// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container -type CheckpointDeleteOptions struct { - CheckpointID string - CheckpointDir string -} - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string - Logs bool -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string - ContainerID string - Running bool - ExitCode int - Pid int -} - -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Quiet bool - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filters filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Until string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string - CheckpointDir string -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool - CopyUIDGID bool -} - -// EventsOptions holds parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - NetworkMode string - ShmSize int64 - Dockerfile string - Ulimits []*units.Ulimit - // BuildArgs needs to be a *string instead of just a string so that - // we can tell the difference between "" (empty string) and no value - // at all (nil). See the parsing of buildArgs in - // api/server/router/build/build_routes.go for even more info. - BuildArgs map[string]*string - AuthConfigs map[string]AuthConfig - Context io.Reader - Labels map[string]string - // squash the resulting image's layers to the parent - // preserves the original image and creates a new one from the parent with all - // the changes applied to a single layer - Squash bool - // CacheFrom specifies images that are used for matching cache. Images - // specified here do not need to have a valid parent chain to match cache. - CacheFrom []string - SecurityOpt []string - ExtraHosts []string // List of extra hosts - Target string - SessionID string - Platform string -} - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. - Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. -} - -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. - SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. -} - -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image - Platform string // Platform is the target platform of the image -} - -// ImageListOptions holds parameters to filter the list of images with. -type ImageListOptions struct { - All bool - Filters filters.Args -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - Platform string -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -//ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args - Limit int -} - -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height uint - Width uint -} - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filters filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string - // Warnings is a set of non-fatal warning messages to pass on to the user. - Warnings []string `json:",omitempty"` -} - -// Values for RegistryAuthFrom in ServiceUpdateOptions -const ( - RegistryAuthFromSpec = "spec" - RegistryAuthFromPreviousSpec = "previous-spec" -) - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. - - // RegistryAuthFrom specifies where to find the registry authorization - // credentials if they are not given in EncodedRegistryAuth. Valid - // values are "spec" and "previous-spec". - RegistryAuthFrom string - - // Rollback indicates whether a server-side rollback should be - // performed. When this is set, the provided spec will be ignored. - // The valid values are "previous" and "none". An empty value is the - // same as "none". - Rollback string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filters filters.Args -} - -// ServiceInspectOptions holds parameters related to the "service inspect" -// operation. -type ServiceInspectOptions struct { - InsertDefaults bool -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filters filters.Args -} - -// PluginRemoveOptions holds parameters to remove plugins. -type PluginRemoveOptions struct { - Force bool -} - -// PluginEnableOptions holds parameters to enable plugins. -type PluginEnableOptions struct { - Timeout int -} - -// PluginDisableOptions holds parameters to disable plugins. -type PluginDisableOptions struct { - Force bool -} - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - RemoteRef string // RemoteRef is the plugin name on the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) - Args []string -} - -// SwarmUnlockKeyResponse contains the response for Engine API: -// GET /swarm/unlockkey -type SwarmUnlockKeyResponse struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// PluginCreateOptions hold all options to plugin create. -type PluginCreateOptions struct { - RepoName string -} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go deleted file mode 100644 index f6537a27f21ef..0000000000000 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ /dev/null @@ -1,57 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Env []string // Environment variables - WorkingDir string // Working directory - Cmd []string // Execution commands and args -} - -// PluginRmConfig holds arguments for plugin remove. -type PluginRmConfig struct { - ForceRemove bool -} - -// PluginEnableConfig holds arguments for plugin enable -type PluginEnableConfig struct { - Timeout int -} - -// PluginDisableConfig holds arguments for plugin disable. -type PluginDisableConfig struct { - ForceDisable bool -} diff --git a/vendor/github.com/docker/docker/api/types/container/BUILD.bazel b/vendor/github.com/docker/docker/api/types/container/BUILD.bazel deleted file mode 100644 index c8ed3a3f3bd72..0000000000000 --- a/vendor/github.com/docker/docker/api/types/container/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "config.go", - "container_changes.go", - "container_create.go", - "container_top.go", - "container_update.go", - "container_wait.go", - "host_config.go", - "hostconfig_unix.go", - "hostconfig_windows.go", - "waitcondition.go", - ], - importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/container", - importpath = "github.com/docker/docker/api/types/container", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/docker/api/types/blkiodev:go_default_library", - "//vendor/github.com/docker/docker/api/types/mount:go_default_library", - "//vendor/github.com/docker/docker/api/types/strslice:go_default_library", - "//vendor/github.com/docker/go-connections/nat:go_default_library", - "//vendor/github.com/docker/go-units:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go deleted file mode 100644 index 89ad08c23461f..0000000000000 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ /dev/null @@ -1,69 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "time" - - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// MinimumDuration puts a minimum on user configured duration. -// This is to prevent API error on time unit. For example, API may -// set 3 as healthcheck interval with intention of 3 seconds, but -// Docker interprets it as 3 nanoseconds. -const MinimumDuration = 1 * time.Millisecond - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go deleted file mode 100644 index c909d6ca3e9e7..0000000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_changes.go +++ /dev/null @@ -1,21 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerChangeResponseItem change item in response to ContainerChanges operation -// swagger:model ContainerChangeResponseItem -type ContainerChangeResponseItem struct { - - // Kind of change - // Required: true - Kind uint8 `json:"Kind"` - - // Path to file that has changed - // Required: true - Path string `json:"Path"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go deleted file mode 100644 index 49efa0f2c093c..0000000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ /dev/null @@ -1,21 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerCreateCreatedBody OK response to ContainerCreate operation -// swagger:model ContainerCreateCreatedBody -type ContainerCreateCreatedBody struct { - - // The ID of the created container - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go deleted file mode 100644 index ba41edcf3f842..0000000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ /dev/null @@ -1,21 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerTopOKBody OK response to ContainerTop operation -// swagger:model ContainerTopOKBody -type ContainerTopOKBody struct { - - // Each process running in the container, where each is process is an array of values corresponding to the titles - // Required: true - Processes [][]string `json:"Processes"` - - // The ps column titles - // Required: true - Titles []string `json:"Titles"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go deleted file mode 100644 index 7630ae54cd6d3..0000000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ /dev/null @@ -1,17 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerUpdateOKBody OK response to ContainerUpdate operation -// swagger:model ContainerUpdateOKBody -type ContainerUpdateOKBody struct { - - // warnings - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go deleted file mode 100644 index 9e3910a6b42ec..0000000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ /dev/null @@ -1,29 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerWaitOKBodyError container waiting error, if any -// swagger:model ContainerWaitOKBodyError -type ContainerWaitOKBodyError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} - -// ContainerWaitOKBody OK response to ContainerWait operation -// swagger:model ContainerWaitOKBody -type ContainerWaitOKBody struct { - - // error - // Required: true - Error *ContainerWaitOKBodyError `json:"Error"` - - // Exit code of the container - // Required: true - StatusCode int64 `json:"StatusCode"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go deleted file mode 100644 index 4ef26fa6c878b..0000000000000 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ /dev/null @@ -1,412 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" - "github.com/docker/go-units" -) - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" -} - -const ( - // IsolationEmpty is unspecified (same behavior as default) - IsolationEmpty = Isolation("") - // IsolationDefault is the default isolation mode on current daemon - IsolationDefault = Isolation("default") - // IsolationProcess is process isolation mode - IsolationProcess = Isolation("process") - // IsolationHyperV is HyperV isolation mode - IsolationHyperV = Isolation("hyperv") -) - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. -func (n IpcMode) IsPrivate() bool { - return n == "private" -} - -// IsHost indicates whether the container shares the host's ipc namespace. -func (n IpcMode) IsHost() bool { - return n == "host" -} - -// IsShareable indicates whether the container's ipc namespace can be shared with another container. -func (n IpcMode) IsShareable() bool { - return n == "shareable" -} - -// IsContainer indicates whether the container uses another container's ipc namespace. -func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// IsNone indicates whether container IpcMode is set to "none". -func (n IpcMode) IsNone() bool { - return n == "none" -} - -// IsEmpty indicates whether container IpcMode is empty -func (n IpcMode) IsEmpty() bool { - return n == "" -} - -// Valid indicates whether the ipc mode is valid. -func (n IpcMode) Valid() bool { - return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == "container" { - return parts[1] - } - return "" -} - -// NetworkMode represents the container network stack. -type NetworkMode string - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - parts := strings.SplitN(string(c), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - return c.IsContainer() || c == "" -} - -// Container returns the name of the container whose cgroup will be used. -func (c CgroupSpec) Container() string { - parts := strings.SplitN(string(c), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name string - MaximumRetryCount int -} - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// LogMode is a type to define the available modes for logging -// These modes affect how logs are handled when log messages start piling up. -type LogMode string - -// Available logging modes -const ( - LogModeUnset = "" - LogModeBlocking LogMode = "blocking" - LogModeNonBlock LogMode = "non-blocking" -) - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period - CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DeviceCgroupRules []string // List of rule to be added to the device cgroup - DiskQuota int64 // Disk limit (in bytes) - KernelMemory int64 // Kernel memory limit (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit int64 // Setting pids limit for a container - Ulimits []*units.Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - Runtime string `json:",omitempty"` // Runtime to use with this container - - // Applicable to Windows - ConsoleSize [2]uint // Initial console size (height,width) - Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources - - // Mounts specs used by the container - Mounts []mount.Mount `json:",omitempty"` - - // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) - MaskedPaths []string - - // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) - ReadonlyPaths []string - - // Run a custom init inside the container, if null, use the daemon's configured settings - Init *bool `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go deleted file mode 100644 index cf6fdf44026ca..0000000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !windows - -package container // import "github.com/docker/docker/api/types/container" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go deleted file mode 100644 index 99f803a5bb170..0000000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ /dev/null @@ -1,40 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// IsBridge indicates whether container uses the bridge network stack -// in windows it is given the name NAT -func (n NetworkMode) IsBridge() bool { - return n == "nat" -} - -// IsHost indicates whether container uses the host network stack. -// returns false as this is not supported by windows -func (n NetworkMode) IsHost() bool { - return false -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() -} - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsDefault() { - return "default" - } else if n.IsBridge() { - return "nat" - } else if n.IsNone() { - return "none" - } else if n.IsContainer() { - return "container" - } else if n.IsUserDefined() { - return n.UserDefined() - } - - return "" -} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go deleted file mode 100644 index cd8311f99cfb1..0000000000000 --- a/vendor/github.com/docker/docker/api/types/container/waitcondition.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// WaitCondition is a type used to specify a container state for which -// to wait. -type WaitCondition string - -// Possible WaitCondition Values. -// -// WaitConditionNotRunning (default) is used to wait for any of the non-running -// states: "created", "exited", "dead", "removing", or "removed". -// -// WaitConditionNextExit is used to wait for the next time the state changes -// to a non-running state. If the state is currently "created" or "exited", -// this would cause Wait() to block until either the container runs and exits -// or is removed. -// -// WaitConditionRemoved is used to wait for the container to be removed. -const ( - WaitConditionNotRunning WaitCondition = "not-running" - WaitConditionNextExit WaitCondition = "next-exit" - WaitConditionRemoved WaitCondition = "removed" -) diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go deleted file mode 100644 index dc942d9d9efa3..0000000000000 --- a/vendor/github.com/docker/docker/api/types/error_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ErrorResponse Represents an error. -// swagger:model ErrorResponse -type ErrorResponse struct { - - // The error message. - // Required: true - Message string `json:"message"` -} diff --git a/vendor/github.com/docker/docker/api/types/filters/BUILD.bazel b/vendor/github.com/docker/docker/api/types/filters/BUILD.bazel deleted file mode 100644 index 256e84a6dee95..0000000000000 --- a/vendor/github.com/docker/docker/api/types/filters/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["parse.go"], - importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/filters", - importpath = "github.com/docker/docker/api/types/filters", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/docker/docker/api/types/versions:go_default_library"], -) diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go deleted file mode 100644 index a41e3d8d96ad6..0000000000000 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ /dev/null @@ -1,350 +0,0 @@ -/*Package filters provides tools for encoding a mapping of keys to a set of -multiple values. -*/ -package filters // import "github.com/docker/docker/api/types/filters" - -import ( - "encoding/json" - "errors" - "regexp" - "strings" - - "github.com/docker/docker/api/types/versions" -) - -// Args stores a mapping of keys to a set of multiple values. -type Args struct { - fields map[string]map[string]bool -} - -// KeyValuePair are used to initialize a new Args -type KeyValuePair struct { - Key string - Value string -} - -// Arg creates a new KeyValuePair for initializing Args -func Arg(key, value string) KeyValuePair { - return KeyValuePair{Key: key, Value: value} -} - -// NewArgs returns a new Args populated with the initial args -func NewArgs(initialArgs ...KeyValuePair) Args { - args := Args{fields: map[string]map[string]bool{}} - for _, arg := range initialArgs { - args.Add(arg.Key, arg.Value) - } - return args -} - -// ParseFlag parses a key=value string and adds it to an Args. -// -// Deprecated: Use Args.Add() -func ParseFlag(arg string, prev Args) (Args, error) { - filters := prev - if len(arg) == 0 { - return filters, nil - } - - if !strings.Contains(arg, "=") { - return filters, ErrBadFormat - } - - f := strings.SplitN(arg, "=", 2) - - name := strings.ToLower(strings.TrimSpace(f[0])) - value := strings.TrimSpace(f[1]) - - filters.Add(name, value) - - return filters, nil -} - -// ErrBadFormat is an error returned when a filter is not in the form key=value -// -// Deprecated: this error will be removed in a future version -var ErrBadFormat = errors.New("bad format of filter (expected name=value)") - -// ToParam encodes the Args as args JSON encoded string -// -// Deprecated: use ToJSON -func ToParam(a Args) (string, error) { - return ToJSON(a) -} - -// MarshalJSON returns a JSON byte representation of the Args -func (args Args) MarshalJSON() ([]byte, error) { - if len(args.fields) == 0 { - return []byte{}, nil - } - return json.Marshal(args.fields) -} - -// ToJSON returns the Args as a JSON encoded string -func ToJSON(a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - buf, err := json.Marshal(a) - return string(buf), err -} - -// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 -// then the encoded format will use an older legacy format where the values are a -// list of strings, instead of a set. -// -// Deprecated: Use ToJSON -func ToParamWithVersion(version string, a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - - if version != "" && versions.LessThan(version, "1.22") { - buf, err := json.Marshal(convertArgsToSlice(a.fields)) - return string(buf), err - } - - return ToJSON(a) -} - -// FromParam decodes a JSON encoded string into Args -// -// Deprecated: use FromJSON -func FromParam(p string) (Args, error) { - return FromJSON(p) -} - -// FromJSON decodes a JSON encoded string into Args -func FromJSON(p string) (Args, error) { - args := NewArgs() - - if p == "" { - return args, nil - } - - raw := []byte(p) - err := json.Unmarshal(raw, &args) - if err == nil { - return args, nil - } - - // Fallback to parsing arguments in the legacy slice format - deprecated := map[string][]string{} - if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, err - } - - args.fields = deprecatedArgs(deprecated) - return args, nil -} - -// UnmarshalJSON populates the Args from JSON encode bytes -func (args Args) UnmarshalJSON(raw []byte) error { - if len(raw) == 0 { - return nil - } - return json.Unmarshal(raw, &args.fields) -} - -// Get returns the list of values associated with the key -func (args Args) Get(key string) []string { - values := args.fields[key] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add a new value to the set of values -func (args Args) Add(key, value string) { - if _, ok := args.fields[key]; ok { - args.fields[key][value] = true - } else { - args.fields[key] = map[string]bool{value: true} - } -} - -// Del removes a value from the set -func (args Args) Del(key, value string) { - if _, ok := args.fields[key]; ok { - delete(args.fields[key], value) - if len(args.fields[key]) == 0 { - delete(args.fields, key) - } - } -} - -// Len returns the number of keys in the mapping -func (args Args) Len() int { - return len(args.fields) -} - -// MatchKVList returns true if all the pairs in sources exist as key=value -// pairs in the mapping at key, or if there are no values at key. -func (args Args) MatchKVList(key string, sources map[string]string) bool { - fieldValues := args.fields[key] - - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if len(sources) == 0 { - return false - } - - for value := range fieldValues { - testKV := strings.SplitN(value, "=", 2) - - v, ok := sources[testKV[0]] - if !ok { - return false - } - if len(testKV) == 2 && testKV[1] != v { - return false - } - } - - return true -} - -// Match returns true if any of the values at key match the source string -func (args Args) Match(field, source string) bool { - if args.ExactMatch(field, source) { - return true - } - - fieldValues := args.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// ExactMatch returns true if the source matches exactly one of the values. -func (args Args) ExactMatch(key, source string) bool { - fieldValues, ok := args.fields[key] - //do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one value and the source -// matches exactly the value. -func (args Args) UniqueExactMatch(key, source string) bool { - fieldValues := args.fields[key] - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(args.fields[key]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one value, or the -// source has one of the values as a prefix. -func (args Args) FuzzyMatch(key, source string) bool { - if args.ExactMatch(key, source) { - return true - } - - fieldValues := args.fields[key] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Include returns true if the key exists in the mapping -// -// Deprecated: use Contains -func (args Args) Include(field string) bool { - _, ok := args.fields[field] - return ok -} - -// Contains returns true if the key exists in the mapping -func (args Args) Contains(field string) bool { - _, ok := args.fields[field] - return ok -} - -type invalidFilter string - -func (e invalidFilter) Error() string { - return "Invalid filter '" + string(e) + "'" -} - -func (invalidFilter) InvalidParameter() {} - -// Validate compared the set of accepted keys against the keys in the mapping. -// An error is returned if any mapping keys are not in the accepted set. -func (args Args) Validate(accepted map[string]bool) error { - for name := range args.fields { - if !accepted[name] { - return invalidFilter(name) - } - } - return nil -} - -// WalkValues iterates over the list of values for a key in the mapping and calls -// op() for each value. If op returns an error the iteration stops and the -// error is returned. -func (args Args) WalkValues(field string, op func(value string) error) error { - if _, ok := args.fields[field]; !ok { - return nil - } - for v := range args.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go deleted file mode 100644 index 4d9bf1c62c892..0000000000000 --- a/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ /dev/null @@ -1,17 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// GraphDriverData Information about a container's graph driver. -// swagger:model GraphDriverData -type GraphDriverData struct { - - // data - // Required: true - Data map[string]string `json:"Data"` - - // name - // Required: true - Name string `json:"Name"` -} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go deleted file mode 100644 index 7592d2f8b152c..0000000000000 --- a/vendor/github.com/docker/docker/api/types/id_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// IDResponse Response to an API call that returns just an Id -// swagger:model IdResponse -type IDResponse struct { - - // The id of the newly created object. - // Required: true - ID string `json:"Id"` -} diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go deleted file mode 100644 index b9a65a0d8e862..0000000000000 --- a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageDeleteResponseItem image delete response item -// swagger:model ImageDeleteResponseItem -type ImageDeleteResponseItem struct { - - // The image ID of an image that was deleted - Deleted string `json:"Deleted,omitempty"` - - // The image ID of an image that was untagged - Untagged string `json:"Untagged,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go deleted file mode 100644 index e145b3dcfcd1a..0000000000000 --- a/vendor/github.com/docker/docker/api/types/image_summary.go +++ /dev/null @@ -1,49 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageSummary image summary -// swagger:model ImageSummary -type ImageSummary struct { - - // containers - // Required: true - Containers int64 `json:"Containers"` - - // created - // Required: true - Created int64 `json:"Created"` - - // Id - // Required: true - ID string `json:"Id"` - - // labels - // Required: true - Labels map[string]string `json:"Labels"` - - // parent Id - // Required: true - ParentID string `json:"ParentId"` - - // repo digests - // Required: true - RepoDigests []string `json:"RepoDigests"` - - // repo tags - // Required: true - RepoTags []string `json:"RepoTags"` - - // shared size - // Required: true - SharedSize int64 `json:"SharedSize"` - - // size - // Required: true - Size int64 `json:"Size"` - - // virtual size - // Required: true - VirtualSize int64 `json:"VirtualSize"` -} diff --git a/vendor/github.com/docker/docker/api/types/mount/BUILD.bazel b/vendor/github.com/docker/docker/api/types/mount/BUILD.bazel deleted file mode 100644 index b5bca8c90618a..0000000000000 --- a/vendor/github.com/docker/docker/api/types/mount/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["mount.go"], - importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/mount", - importpath = "github.com/docker/docker/api/types/mount", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go deleted file mode 100644 index 3fef974df8835..0000000000000 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ /dev/null @@ -1,130 +0,0 @@ -package mount // import "github.com/docker/docker/api/types/mount" - -import ( - "os" -) - -// Type represents the type of a mount. -type Type string - -// Type constants -const ( - // TypeBind is the type for mounting host dir - TypeBind Type = "bind" - // TypeVolume is the type for remote storage volumes - TypeVolume Type = "volume" - // TypeTmpfs is the type for mounting tmpfs - TypeTmpfs Type = "tmpfs" - // TypeNamedPipe is the type for mounting Windows named pipes - TypeNamedPipe Type = "npipe" -) - -// Mount represents a mount (volume). -type Mount struct { - Type Type `json:",omitempty"` - // Source specifies the name of the mount. Depending on mount type, this - // may be a volume name or a host path, or even ignored. - // Source is not supported for tmpfs (must be an empty value) - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Consistency Consistency `json:",omitempty"` - - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` - TmpfsOptions *TmpfsOptions `json:",omitempty"` -} - -// Propagation represents the propagation of a mount. -type Propagation string - -const ( - // PropagationRPrivate RPRIVATE - PropagationRPrivate Propagation = "rprivate" - // PropagationPrivate PRIVATE - PropagationPrivate Propagation = "private" - // PropagationRShared RSHARED - PropagationRShared Propagation = "rshared" - // PropagationShared SHARED - PropagationShared Propagation = "shared" - // PropagationRSlave RSLAVE - PropagationRSlave Propagation = "rslave" - // PropagationSlave SLAVE - PropagationSlave Propagation = "slave" -) - -// Propagations is the list of all valid mount propagations -var Propagations = []Propagation{ - PropagationRPrivate, - PropagationPrivate, - PropagationRShared, - PropagationShared, - PropagationRSlave, - PropagationSlave, -} - -// Consistency represents the consistency requirements of a mount. -type Consistency string - -const ( - // ConsistencyFull guarantees bind mount-like consistency - ConsistencyFull Consistency = "consistent" - // ConsistencyCached mounts can cache read data and FS structure - ConsistencyCached Consistency = "cached" - // ConsistencyDelegated mounts can cache read and written data and structure - ConsistencyDelegated Consistency = "delegated" - // ConsistencyDefault provides "consistent" behavior unless overridden - ConsistencyDefault Consistency = "default" -) - -// BindOptions defines options specific to mounts of type "bind". -type BindOptions struct { - Propagation Propagation `json:",omitempty"` -} - -// VolumeOptions represents the options for a mount of type volume. -type VolumeOptions struct { - NoCopy bool `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - DriverConfig *Driver `json:",omitempty"` -} - -// Driver represents a volume driver. -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TmpfsOptions defines options specific to mounts of type "tmpfs". -type TmpfsOptions struct { - // Size sets the size of the tmpfs, in bytes. - // - // This will be converted to an operating system specific value - // depending on the host. For example, on linux, it will be converted to - // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with - // docker, uses a straight byte value. - // - // Percentages are not supported. - SizeBytes int64 `json:",omitempty"` - // Mode of the tmpfs upon creation - Mode os.FileMode `json:",omitempty"` - - // TODO(stevvooe): There are several more tmpfs flags, specified in the - // daemon, that are accepted. Only the most basic are added for now. - // - // From docker/docker/pkg/mount/flags.go: - // - // var validFlags = map[string]bool{ - // "": true, - // "size": true, X - // "mode": true, X - // "uid": true, - // "gid": true, - // "nr_inodes": true, - // "nr_blocks": true, - // "mpol": true, - // } - // - // Some of these may be straightforward to add, but others, such as - // uid/gid have implications in a clustered system. -} diff --git a/vendor/github.com/docker/docker/api/types/network/BUILD.bazel b/vendor/github.com/docker/docker/api/types/network/BUILD.bazel deleted file mode 100644 index 7543a6c3c0774..0000000000000 --- a/vendor/github.com/docker/docker/api/types/network/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["network.go"], - importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/network", - importpath = "github.com/docker/docker/api/types/network", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go deleted file mode 100644 index 761d0b34f2f16..0000000000000 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ /dev/null @@ -1,108 +0,0 @@ -package network // import "github.com/docker/docker/api/types/network" - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string //Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// Copy makes a copy of the endpoint ipam config -func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { - cfgCopy := *cfg - cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) - cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) - return &cfgCopy -} - -// PeerInfo represents one peer of an overlay network -type PeerInfo struct { - Name string - IP string -} - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string - DriverOpts map[string]string -} - -// Task carries the information about one backend task -type Task struct { - Name string - EndpointID string - EndpointIP string - Info map[string]string -} - -// ServiceInfo represents service parameters with the list of service's tasks -type ServiceInfo struct { - VIP string - Ports []string - LocalLBIndex int - Tasks []Task -} - -// Copy makes a deep copy of `EndpointSettings` -func (es *EndpointSettings) Copy() *EndpointSettings { - epCopy := *es - if es.IPAMConfig != nil { - epCopy.IPAMConfig = es.IPAMConfig.Copy() - } - - if es.Links != nil { - links := make([]string, 0, len(es.Links)) - epCopy.Links = append(links, es.Links...) - } - - if es.Aliases != nil { - aliases := make([]string, 0, len(es.Aliases)) - epCopy.Aliases = append(aliases, es.Aliases...) - } - return &epCopy -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} - -// ConfigReference specifies the source which provides a network's configuration -type ConfigReference struct { - Network string -} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go deleted file mode 100644 index abae48b9ab010..0000000000000 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ /dev/null @@ -1,203 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Plugin A plugin for the Engine API -// swagger:model Plugin -type Plugin struct { - - // config - // Required: true - Config PluginConfig `json:"Config"` - - // True if the plugin is running. False if the plugin is not running, only installed. - // Required: true - Enabled bool `json:"Enabled"` - - // Id - ID string `json:"Id,omitempty"` - - // name - // Required: true - Name string `json:"Name"` - - // plugin remote reference used to push/pull the plugin - PluginReference string `json:"PluginReference,omitempty"` - - // settings - // Required: true - Settings PluginSettings `json:"Settings"` -} - -// PluginConfig The config of a plugin. -// swagger:model PluginConfig -type PluginConfig struct { - - // args - // Required: true - Args PluginConfigArgs `json:"Args"` - - // description - // Required: true - Description string `json:"Description"` - - // Docker Version used to create the plugin - DockerVersion string `json:"DockerVersion,omitempty"` - - // documentation - // Required: true - Documentation string `json:"Documentation"` - - // entrypoint - // Required: true - Entrypoint []string `json:"Entrypoint"` - - // env - // Required: true - Env []PluginEnv `json:"Env"` - - // interface - // Required: true - Interface PluginConfigInterface `json:"Interface"` - - // ipc host - // Required: true - IpcHost bool `json:"IpcHost"` - - // linux - // Required: true - Linux PluginConfigLinux `json:"Linux"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` - - // network - // Required: true - Network PluginConfigNetwork `json:"Network"` - - // pid host - // Required: true - PidHost bool `json:"PidHost"` - - // propagated mount - // Required: true - PropagatedMount string `json:"PropagatedMount"` - - // user - User PluginConfigUser `json:"User,omitempty"` - - // work dir - // Required: true - WorkDir string `json:"WorkDir"` - - // rootfs - Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` -} - -// PluginConfigArgs plugin config args -// swagger:model PluginConfigArgs -type PluginConfigArgs struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value []string `json:"Value"` -} - -// PluginConfigInterface The interface between Docker and the plugin -// swagger:model PluginConfigInterface -type PluginConfigInterface struct { - - // Protocol to use for clients connecting to the plugin. - ProtocolScheme string `json:"ProtocolScheme,omitempty"` - - // socket - // Required: true - Socket string `json:"Socket"` - - // types - // Required: true - Types []PluginInterfaceType `json:"Types"` -} - -// PluginConfigLinux plugin config linux -// swagger:model PluginConfigLinux -type PluginConfigLinux struct { - - // allow all devices - // Required: true - AllowAllDevices bool `json:"AllowAllDevices"` - - // capabilities - // Required: true - Capabilities []string `json:"Capabilities"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` -} - -// PluginConfigNetwork plugin config network -// swagger:model PluginConfigNetwork -type PluginConfigNetwork struct { - - // type - // Required: true - Type string `json:"Type"` -} - -// PluginConfigRootfs plugin config rootfs -// swagger:model PluginConfigRootfs -type PluginConfigRootfs struct { - - // diff ids - DiffIds []string `json:"diff_ids"` - - // type - Type string `json:"type,omitempty"` -} - -// PluginConfigUser plugin config user -// swagger:model PluginConfigUser -type PluginConfigUser struct { - - // g ID - GID uint32 `json:"GID,omitempty"` - - // UID - UID uint32 `json:"UID,omitempty"` -} - -// PluginSettings Settings that can be modified by users. -// swagger:model PluginSettings -type PluginSettings struct { - - // args - // Required: true - Args []string `json:"Args"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` - - // env - // Required: true - Env []string `json:"Env"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go deleted file mode 100644 index 569901067559b..0000000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_device.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginDevice plugin device -// swagger:model PluginDevice -type PluginDevice struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // path - // Required: true - Path *string `json:"Path"` - - // settable - // Required: true - Settable []string `json:"Settable"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go deleted file mode 100644 index 32962dc2ebeab..0000000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_env.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginEnv plugin env -// swagger:model PluginEnv -type PluginEnv struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value *string `json:"Value"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go deleted file mode 100644 index c82f204e87080..0000000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginInterfaceType plugin interface type -// swagger:model PluginInterfaceType -type PluginInterfaceType struct { - - // capability - // Required: true - Capability string `json:"Capability"` - - // prefix - // Required: true - Prefix string `json:"Prefix"` - - // version - // Required: true - Version string `json:"Version"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go deleted file mode 100644 index 5c031cf8b5cc0..0000000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_mount.go +++ /dev/null @@ -1,37 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginMount plugin mount -// swagger:model PluginMount -type PluginMount struct { - - // description - // Required: true - Description string `json:"Description"` - - // destination - // Required: true - Destination string `json:"Destination"` - - // name - // Required: true - Name string `json:"Name"` - - // options - // Required: true - Options []string `json:"Options"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // source - // Required: true - Source *string `json:"Source"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go deleted file mode 100644 index 60d1fb5ad8550..0000000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ /dev/null @@ -1,71 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "encoding/json" - "fmt" - "sort" -) - -// PluginsListResponse contains the response for the Engine API -type PluginsListResponse []*Plugin - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege - -func (s PluginPrivileges) Len() int { - return len(s) -} - -func (s PluginPrivileges) Less(i, j int) bool { - return s[i].Name < s[j].Name -} - -func (s PluginPrivileges) Swap(i, j int) { - sort.Strings(s[i].Value) - sort.Strings(s[j].Value) - s[i], s[j] = s[j], s[i] -} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go deleted file mode 100644 index d91234744c6bc..0000000000000 --- a/vendor/github.com/docker/docker/api/types/port.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Port An open port on a container -// swagger:model Port -type Port struct { - - // Host IP address that the container's port is mapped to - IP string `json:"IP,omitempty"` - - // Port on the container - // Required: true - PrivatePort uint16 `json:"PrivatePort"` - - // Port exposed on the host - PublicPort uint16 `json:"PublicPort,omitempty"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/BUILD.bazel b/vendor/github.com/docker/docker/api/types/registry/BUILD.bazel deleted file mode 100644 index 84fbeffd34b1b..0000000000000 --- a/vendor/github.com/docker/docker/api/types/registry/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authenticate.go", - "registry.go", - ], - importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/registry", - importpath = "github.com/docker/docker/api/types/registry", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/opencontainers/image-spec/specs-go/v1:go_default_library"], -) diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go deleted file mode 100644 index f0a2113e405a1..0000000000000 --- a/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ /dev/null @@ -1,21 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// AuthenticateOKBody authenticate o k body -// swagger:model AuthenticateOKBody -type AuthenticateOKBody struct { - - // An opaque token used to authenticate a user after a successful login - // Required: true - IdentityToken string `json:"IdentityToken"` - - // The status of the authentication - // Required: true - Status string `json:"Status"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go deleted file mode 100644 index 8789ad3b32101..0000000000000 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ /dev/null @@ -1,119 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -import ( - "encoding/json" - "net" - - "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - AllowNondistributableArtifactsCIDRs []*NetIPNet - AllowNondistributableArtifactsHostnames []string - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// String returns the CIDR notation of ipnet -func (ipnet *NetIPNet) String() string { - return (*net.IPNet)(ipnet).String() -} - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} - -// DistributionInspect describes the result obtained from contacting the -// registry to retrieve image metadata -type DistributionInspect struct { - // Descriptor contains information about the manifest, including - // the content addressable digest - Descriptor v1.Descriptor - // Platforms contains the list of platforms supported by the image, - // obtained by parsing the manifest - Platforms []v1.Platform -} diff --git a/vendor/github.com/docker/docker/api/types/seccomp.go b/vendor/github.com/docker/docker/api/types/seccomp.go deleted file mode 100644 index 67a41e1a89e87..0000000000000 --- a/vendor/github.com/docker/docker/api/types/seccomp.go +++ /dev/null @@ -1,93 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -// Seccomp represents the config for a seccomp profile for syscall restriction. -type Seccomp struct { - DefaultAction Action `json:"defaultAction"` - // Architectures is kept to maintain backward compatibility with the old - // seccomp profile. - Architectures []Arch `json:"architectures,omitempty"` - ArchMap []Architecture `json:"archMap,omitempty"` - Syscalls []*Syscall `json:"syscalls"` -} - -// Architecture is used to represent a specific architecture -// and its sub-architectures -type Architecture struct { - Arch Arch `json:"architecture"` - SubArches []Arch `json:"subArchitectures"` -} - -// Arch used for architectures -type Arch string - -// Additional architectures permitted to be used for system calls -// By default only the native architecture of the kernel is permitted -const ( - ArchX86 Arch = "SCMP_ARCH_X86" - ArchX86_64 Arch = "SCMP_ARCH_X86_64" - ArchX32 Arch = "SCMP_ARCH_X32" - ArchARM Arch = "SCMP_ARCH_ARM" - ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" - ArchMIPS Arch = "SCMP_ARCH_MIPS" - ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" - ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" - ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" - ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" - ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" - ArchPPC Arch = "SCMP_ARCH_PPC" - ArchPPC64 Arch = "SCMP_ARCH_PPC64" - ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" - ArchS390 Arch = "SCMP_ARCH_S390" - ArchS390X Arch = "SCMP_ARCH_S390X" -) - -// Action taken upon Seccomp rule match -type Action string - -// Define actions for Seccomp rules -const ( - ActKill Action = "SCMP_ACT_KILL" - ActTrap Action = "SCMP_ACT_TRAP" - ActErrno Action = "SCMP_ACT_ERRNO" - ActTrace Action = "SCMP_ACT_TRACE" - ActAllow Action = "SCMP_ACT_ALLOW" -) - -// Operator used to match syscall arguments in Seccomp -type Operator string - -// Define operators for syscall arguments in Seccomp -const ( - OpNotEqual Operator = "SCMP_CMP_NE" - OpLessThan Operator = "SCMP_CMP_LT" - OpLessEqual Operator = "SCMP_CMP_LE" - OpEqualTo Operator = "SCMP_CMP_EQ" - OpGreaterEqual Operator = "SCMP_CMP_GE" - OpGreaterThan Operator = "SCMP_CMP_GT" - OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" -) - -// Arg used for matching specific syscall arguments in Seccomp -type Arg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"valueTwo"` - Op Operator `json:"op"` -} - -// Filter is used to conditionally apply Seccomp rules -type Filter struct { - Caps []string `json:"caps,omitempty"` - Arches []string `json:"arches,omitempty"` -} - -// Syscall is used to match a group of syscalls in Seccomp -type Syscall struct { - Name string `json:"name,omitempty"` - Names []string `json:"names,omitempty"` - Action Action `json:"action"` - Args []*Arg `json:"args"` - Comment string `json:"comment"` - Includes Filter `json:"includes"` - Excludes Filter `json:"excludes"` -} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go deleted file mode 100644 index 74ea64b1bb671..0000000000000 --- a/vendor/github.com/docker/docker/api/types/service_update_response.go +++ /dev/null @@ -1,12 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ServiceUpdateResponse service update response -// swagger:model ServiceUpdateResponse -type ServiceUpdateResponse struct { - - // Optional warning messages - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go deleted file mode 100644 index 60175c0613601..0000000000000 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ /dev/null @@ -1,181 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types // import "github.com/docker/docker/api/types" - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container. -// Not used on Windows. -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds (Linux) - // Units: 100's of nanoseconds (Windows) - TotalUsage uint64 `json:"total_usage"` - - // Total CPU time consumed per core (Linux). Not used on Windows. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage,omitempty"` - - // Time spent by tasks of the cgroup in kernel mode (Linux). - // Time spent by all container processes in kernel mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - - // Time spent by tasks of the cgroup in user mode (Linux). - // Time spent by all container processes in user mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - // CPU Usage. Linux and Windows. - CPUUsage CPUUsage `json:"cpu_usage"` - - // System Usage. Linux only. - SystemUsage uint64 `json:"system_cpu_usage,omitempty"` - - // Online CPUs. Linux only. - OnlineCPUs uint32 `json:"online_cpus,omitempty"` - - // Throttling Data. Linux only. - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates all memory stats since container inception on Linux. -// Windows returns stats for commit and private working set only. -type MemoryStats struct { - // Linux Memory Stats - - // current res_counter usage for memory - Usage uint64 `json:"usage,omitempty"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage,omitempty"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats,omitempty"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty"` - - // Windows Memory Stats - // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx - - // committed bytes - Commit uint64 `json:"commitbytes,omitempty"` - // peak committed bytes - CommitPeak uint64 `json:"commitpeakbytes,omitempty"` - // private working set - PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// Not used on Windows. -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write. -// This is a Linux specific structure as the differences between expressing -// block I/O on Windows and Linux are sufficiently significant to make -// little sense attempting to morph into a combined structure. -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// StorageStats is the disk I/O stats for read/write on Windows. -type StorageStats struct { - ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` - ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` - WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` - WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` -} - -// NetworkStats aggregates the network stats of one container -type NetworkStats struct { - // Bytes received. Windows and Linux. - RxBytes uint64 `json:"rx_bytes"` - // Packets received. Windows and Linux. - RxPackets uint64 `json:"rx_packets"` - // Received errors. Not used on Windows. Note that we dont `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - RxErrors uint64 `json:"rx_errors"` - // Incoming packets dropped. Windows and Linux. - RxDropped uint64 `json:"rx_dropped"` - // Bytes sent. Windows and Linux. - TxBytes uint64 `json:"tx_bytes"` - // Packets sent. Windows and Linux. - TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we dont `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - TxErrors uint64 `json:"tx_errors"` - // Outgoing packets dropped. Windows and Linux. - TxDropped uint64 `json:"tx_dropped"` - // Endpoint ID. Not used on Linux. - EndpointID string `json:"endpoint_id,omitempty"` - // Instance ID. Not used on Linux. - InstanceID string `json:"instance_id,omitempty"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - // Common stats - Read time.Time `json:"read"` - PreRead time.Time `json:"preread"` - - // Linux specific stats, not populated on Windows. - PidsStats PidsStats `json:"pids_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - - // Windows specific stats, not populated on Linux. - NumProcs uint32 `json:"num_procs"` - StorageStats StorageStats `json:"storage_stats,omitempty"` - - // Shared stats - CPUStats CPUStats `json:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" - MemoryStats MemoryStats `json:"memory_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/strslice/BUILD.bazel b/vendor/github.com/docker/docker/api/types/strslice/BUILD.bazel deleted file mode 100644 index 045bce0bb4cac..0000000000000 --- a/vendor/github.com/docker/docker/api/types/strslice/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["strslice.go"], - importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/strslice", - importpath = "github.com/docker/docker/api/types/strslice", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go deleted file mode 100644 index 82921cebc1502..0000000000000 --- a/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice // import "github.com/docker/docker/api/types/strslice" - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/BUILD.bazel b/vendor/github.com/docker/docker/api/types/swarm/BUILD.bazel deleted file mode 100644 index 89ac5e582ecca..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "common.go", - "config.go", - "container.go", - "network.go", - "node.go", - "runtime.go", - "secret.go", - "service.go", - "swarm.go", - "task.go", - ], - importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/swarm", - importpath = "github.com/docker/docker/api/types/swarm", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/docker/api/types/container:go_default_library", - "//vendor/github.com/docker/docker/api/types/mount:go_default_library", - "//vendor/github.com/docker/docker/api/types/network:go_default_library", - "//vendor/github.com/docker/docker/api/types/swarm/runtime:go_default_library", - ], -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go deleted file mode 100644 index ef020f458bd4c..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ /dev/null @@ -1,40 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Version represents the internal object version. -type Version struct { - Index uint64 `json:",omitempty"` -} - -// Meta is a base object inherited by most of the other once. -type Meta struct { - Version Version `json:",omitempty"` - CreatedAt time.Time `json:",omitempty"` - UpdatedAt time.Time `json:",omitempty"` -} - -// Annotations represents how to describe an object. -type Annotations struct { - Name string `json:",omitempty"` - Labels map[string]string `json:"Labels"` -} - -// Driver represents a driver (network, logging, secrets backend). -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TLSInfo represents the TLS information about what CA certificate is trusted, -// and who the issuer for a TLS certificate is -type TLSInfo struct { - // TrustRoot is the trusted CA root certificate in PEM format - TrustRoot string `json:",omitempty"` - - // CertIssuer is the raw subject bytes of the issuer - CertIssuerSubject []byte `json:",omitempty"` - - // CertIssuerPublicKey is the raw public key bytes of the issuer - CertIssuerPublicKey []byte `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go deleted file mode 100644 index a1555cf43eeeb..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/config.go +++ /dev/null @@ -1,35 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Config represents a config. -type Config struct { - ID string - Meta - Spec ConfigSpec -} - -// ConfigSpec represents a config specification from a config in swarm -type ConfigSpec struct { - Annotations - Data []byte `json:",omitempty"` - - // Templating controls whether and how to evaluate the config payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// ConfigReferenceFileTarget is a file target in a config reference -type ConfigReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// ConfigReference is a reference to a config in swarm -type ConfigReference struct { - File *ConfigReferenceFileTarget - ConfigID string - ConfigName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go deleted file mode 100644 index 151211ff5a496..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ /dev/null @@ -1,74 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" -) - -// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) -// Detailed documentation is available in: -// http://man7.org/linux/man-pages/man5/resolv.conf.5.html -// `nameserver`, `search`, `options` have been supported. -// TODO: `domain` is not supported yet. -type DNSConfig struct { - // Nameservers specifies the IP addresses of the name servers - Nameservers []string `json:",omitempty"` - // Search specifies the search list for host-name lookup - Search []string `json:",omitempty"` - // Options allows certain internal resolver variables to be modified - Options []string `json:",omitempty"` -} - -// SELinuxContext contains the SELinux labels of the container. -type SELinuxContext struct { - Disable bool - - User string - Role string - Type string - Level string -} - -// CredentialSpec for managed service account (Windows only) -type CredentialSpec struct { - File string - Registry string -} - -// Privileges defines the security options for the container. -type Privileges struct { - CredentialSpec *CredentialSpec - SELinuxContext *SELinuxContext -} - -// ContainerSpec represents the spec of a container. -type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Hostname string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Groups []string `json:",omitempty"` - Privileges *Privileges `json:",omitempty"` - Init *bool `json:",omitempty"` - StopSignal string `json:",omitempty"` - TTY bool `json:",omitempty"` - OpenStdin bool `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Mounts []mount.Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` - Healthcheck *container.HealthConfig `json:",omitempty"` - // The format of extra hosts on swarmkit is specified in: - // http://man7.org/linux/man-pages/man5/hosts.5.html - // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` - Isolation container.Isolation `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go deleted file mode 100644 index 98ef3284d1da0..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ /dev/null @@ -1,121 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "github.com/docker/docker/api/types/network" -) - -// Endpoint represents an endpoint. -type Endpoint struct { - Spec EndpointSpec `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` - VirtualIPs []EndpointVirtualIP `json:",omitempty"` -} - -// EndpointSpec represents the spec of an endpoint. -type EndpointSpec struct { - Mode ResolutionMode `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` -} - -// ResolutionMode represents a resolution mode. -type ResolutionMode string - -const ( - // ResolutionModeVIP VIP - ResolutionModeVIP ResolutionMode = "vip" - // ResolutionModeDNSRR DNSRR - ResolutionModeDNSRR ResolutionMode = "dnsrr" -) - -// PortConfig represents the config of a port. -type PortConfig struct { - Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` - // TargetPort is the port inside the container - TargetPort uint32 `json:",omitempty"` - // PublishedPort is the port on the swarm hosts - PublishedPort uint32 `json:",omitempty"` - // PublishMode is the mode in which port is published - PublishMode PortConfigPublishMode `json:",omitempty"` -} - -// PortConfigPublishMode represents the mode in which the port is to -// be published. -type PortConfigPublishMode string - -const ( - // PortConfigPublishModeIngress is used for ports published - // for ingress load balancing using routing mesh. - PortConfigPublishModeIngress PortConfigPublishMode = "ingress" - // PortConfigPublishModeHost is used for ports published - // for direct host level access on the host where the task is running. - PortConfigPublishModeHost PortConfigPublishMode = "host" -) - -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" - // PortConfigProtocolSCTP SCTP - PortConfigProtocolSCTP PortConfigProtocol = "sctp" -) - -// EndpointVirtualIP represents the virtual ip of a port. -type EndpointVirtualIP struct { - NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Network represents a network. -type Network struct { - ID string - Meta - Spec NetworkSpec `json:",omitempty"` - DriverState Driver `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkSpec represents the spec of a network. -type NetworkSpec struct { - Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - Ingress bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` - ConfigFrom *network.ConfigReference `json:",omitempty"` - Scope string `json:",omitempty"` -} - -// NetworkAttachmentConfig represents the configuration of a network attachment. -type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` - DriverOpts map[string]string `json:",omitempty"` -} - -// NetworkAttachment represents a network attachment. -type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` -} - -// IPAMOptions represents ipam options. -type IPAMOptions struct { - Driver Driver `json:",omitempty"` - Configs []IPAMConfig `json:",omitempty"` -} - -// IPAMConfig represents ipam configuration. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go deleted file mode 100644 index 1e30f5fa10ddb..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ /dev/null @@ -1,115 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// Node represents a node. -type Node struct { - ID string - Meta - // Spec defines the desired state of the node as specified by the user. - // The system will honor this and will *never* modify it. - Spec NodeSpec `json:",omitempty"` - // Description encapsulates the properties of the Node as reported by the - // agent. - Description NodeDescription `json:",omitempty"` - // Status provides the current status of the node, as seen by the manager. - Status NodeStatus `json:",omitempty"` - // ManagerStatus provides the current status of the node's manager - // component, if the node is a manager. - ManagerStatus *ManagerStatus `json:",omitempty"` -} - -// NodeSpec represents the spec of a node. -type NodeSpec struct { - Annotations - Role NodeRole `json:",omitempty"` - Availability NodeAvailability `json:",omitempty"` -} - -// NodeRole represents the role of a node. -type NodeRole string - -const ( - // NodeRoleWorker WORKER - NodeRoleWorker NodeRole = "worker" - // NodeRoleManager MANAGER - NodeRoleManager NodeRole = "manager" -) - -// NodeAvailability represents the availability of a node. -type NodeAvailability string - -const ( - // NodeAvailabilityActive ACTIVE - NodeAvailabilityActive NodeAvailability = "active" - // NodeAvailabilityPause PAUSE - NodeAvailabilityPause NodeAvailability = "pause" - // NodeAvailabilityDrain DRAIN - NodeAvailabilityDrain NodeAvailability = "drain" -) - -// NodeDescription represents the description of a node. -type NodeDescription struct { - Hostname string `json:",omitempty"` - Platform Platform `json:",omitempty"` - Resources Resources `json:",omitempty"` - Engine EngineDescription `json:",omitempty"` - TLSInfo TLSInfo `json:",omitempty"` -} - -// Platform represents the platform (Arch/OS). -type Platform struct { - Architecture string `json:",omitempty"` - OS string `json:",omitempty"` -} - -// EngineDescription represents the description of an engine. -type EngineDescription struct { - EngineVersion string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Plugins []PluginDescription `json:",omitempty"` -} - -// PluginDescription represents the description of an engine plugin. -type PluginDescription struct { - Type string `json:",omitempty"` - Name string `json:",omitempty"` -} - -// NodeStatus represents the status of a node. -type NodeStatus struct { - State NodeState `json:",omitempty"` - Message string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Reachability represents the reachability of a node. -type Reachability string - -const ( - // ReachabilityUnknown UNKNOWN - ReachabilityUnknown Reachability = "unknown" - // ReachabilityUnreachable UNREACHABLE - ReachabilityUnreachable Reachability = "unreachable" - // ReachabilityReachable REACHABLE - ReachabilityReachable Reachability = "reachable" -) - -// ManagerStatus represents the status of a manager. -type ManagerStatus struct { - Leader bool `json:",omitempty"` - Reachability Reachability `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// NodeState represents the state of a node. -type NodeState string - -const ( - // NodeStateUnknown UNKNOWN - NodeStateUnknown NodeState = "unknown" - // NodeStateDown DOWN - NodeStateDown NodeState = "down" - // NodeStateReady READY - NodeStateReady NodeState = "ready" - // NodeStateDisconnected DISCONNECTED - NodeStateDisconnected NodeState = "disconnected" -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go deleted file mode 100644 index 0c77403ccff93..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ /dev/null @@ -1,27 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// RuntimeType is the type of runtime used for the TaskSpec -type RuntimeType string - -// RuntimeURL is the proto type url -type RuntimeURL string - -const ( - // RuntimeContainer is the container based runtime - RuntimeContainer RuntimeType = "container" - // RuntimePlugin is the plugin based runtime - RuntimePlugin RuntimeType = "plugin" - // RuntimeNetworkAttachment is the network attachment runtime - RuntimeNetworkAttachment RuntimeType = "attachment" - - // RuntimeURLContainer is the proto url for the container type - RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" - // RuntimeURLPlugin is the proto url for the plugin type - RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" -) - -// NetworkAttachmentSpec represents the runtime spec type for network -// attachment tasks -type NetworkAttachmentSpec struct { - ContainerID string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/BUILD.bazel b/vendor/github.com/docker/docker/api/types/swarm/runtime/BUILD.bazel deleted file mode 100644 index 6c2155de82c29..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "gen.go", - "plugin.pb.go", - ], - importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/swarm/runtime", - importpath = "github.com/docker/docker/api/types/swarm/runtime", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/gogo/protobuf/proto:go_default_library"], -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go deleted file mode 100644 index 98c2806c31dc4..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto - -package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go deleted file mode 100644 index 1fdc9b0436135..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ /dev/null @@ -1,712 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: plugin.proto -// DO NOT EDIT! - -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - plugin.proto - - It has these top-level messages: - PluginSpec - PluginPrivilege -*/ -package runtime - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -type PluginSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } - -func (m *PluginSpec) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginSpec) GetRemote() string { - if m != nil { - return m.Remote - } - return "" -} - -func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { - if m != nil { - return m.Privileges - } - return nil -} - -func (m *PluginSpec) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` -} - -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } - -func (m *PluginPrivilege) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginPrivilege) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PluginPrivilege) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*PluginSpec)(nil), "PluginSpec") - proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") -} -func (m *PluginSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Remote) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i += copy(dAtA[i:], m.Remote) - } - if len(m.Privileges) > 0 { - for _, msg := range m.Privileges { - dAtA[i] = 0x1a - i++ - i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Disabled { - dAtA[i] = 0x20 - i++ - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Description) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PluginSpec) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Remote) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Privileges) > 0 { - for _, e := range m.Privileges { - l = e.Size() - n += 1 + l + sovPlugin(uint64(l)) - } - } - if m.Disabled { - n += 2 - } - return n -} - -func (m *PluginPrivilege) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func sovPlugin(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozPlugin(x uint64) (n int) { - return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PluginSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Remote = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Privileges = append(m.Privileges, &PluginPrivilege{}) - if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlugin(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthPlugin - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipPlugin(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } - -var fileDescriptorPlugin = []byte{ - // 196 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, - 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, - 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, - 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, - 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, - 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, - 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, - 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, - 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, - 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, - 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, - 0x0c, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto deleted file mode 100644 index 6d63b7783fd9f..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -message PluginSpec { - string name = 1; - string remote = 2; - repeated PluginPrivilege privileges = 3; - bool disabled = 4; -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -message PluginPrivilege { - string name = 1; - string description = 2; - repeated string value = 3; -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go deleted file mode 100644 index d5213ec981c3d..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ /dev/null @@ -1,36 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Secret represents a secret. -type Secret struct { - ID string - Meta - Spec SecretSpec -} - -// SecretSpec represents a secret specification from a secret in swarm -type SecretSpec struct { - Annotations - Data []byte `json:",omitempty"` - Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store - - // Templating controls whether and how to evaluate the secret payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// SecretReferenceFileTarget is a file target in a secret reference -type SecretReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// SecretReference is a reference to a secret in swarm -type SecretReference struct { - File *SecretReferenceFileTarget - SecretID string - SecretName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go deleted file mode 100644 index abf192e759414..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ /dev/null @@ -1,124 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Service represents a service. -type Service struct { - ID string - Meta - Spec ServiceSpec `json:",omitempty"` - PreviousSpec *ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus *UpdateStatus `json:",omitempty"` -} - -// ServiceSpec represents the spec of a service. -type ServiceSpec struct { - Annotations - - // TaskTemplate defines how the service should construct new tasks when - // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - RollbackConfig *UpdateConfig `json:",omitempty"` - - // Networks field in ServiceSpec is deprecated. The - // same field in TaskSpec should be used instead. - // This field will be removed in a future release. - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` -} - -// ServiceMode represents the mode of a service. -type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` -} - -// UpdateState is the state of a service update. -type UpdateState string - -const ( - // UpdateStateUpdating is the updating state. - UpdateStateUpdating UpdateState = "updating" - // UpdateStatePaused is the paused state. - UpdateStatePaused UpdateState = "paused" - // UpdateStateCompleted is the completed state. - UpdateStateCompleted UpdateState = "completed" - // UpdateStateRollbackStarted is the state with a rollback in progress. - UpdateStateRollbackStarted UpdateState = "rollback_started" - // UpdateStateRollbackPaused is the state with a rollback in progress. - UpdateStateRollbackPaused UpdateState = "rollback_paused" - // UpdateStateRollbackCompleted is the state with a rollback in progress. - UpdateStateRollbackCompleted UpdateState = "rollback_completed" -) - -// UpdateStatus reports the status of a service update. -type UpdateStatus struct { - State UpdateState `json:",omitempty"` - StartedAt *time.Time `json:",omitempty"` - CompletedAt *time.Time `json:",omitempty"` - Message string `json:",omitempty"` -} - -// ReplicatedService is a kind of ServiceMode. -type ReplicatedService struct { - Replicas *uint64 `json:",omitempty"` -} - -// GlobalService is a kind of ServiceMode. -type GlobalService struct{} - -const ( - // UpdateFailureActionPause PAUSE - UpdateFailureActionPause = "pause" - // UpdateFailureActionContinue CONTINUE - UpdateFailureActionContinue = "continue" - // UpdateFailureActionRollback ROLLBACK - UpdateFailureActionRollback = "rollback" - - // UpdateOrderStopFirst STOP_FIRST - UpdateOrderStopFirst = "stop-first" - // UpdateOrderStartFirst START_FIRST - UpdateOrderStartFirst = "start-first" -) - -// UpdateConfig represents the update configuration. -type UpdateConfig struct { - // Maximum number of tasks to be updated in one iteration. - // 0 means unlimited parallelism. - Parallelism uint64 - - // Amount of time between updates. - Delay time.Duration `json:",omitempty"` - - // FailureAction is the action to take when an update failures. - FailureAction string `json:",omitempty"` - - // Monitor indicates how long to monitor a task for failure after it is - // created. If the task fails by ending up in one of the states - // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, - // this counts as a failure. If it fails after Monitor, it does not - // count as a failure. If Monitor is unspecified, a default value will - // be used. - Monitor time.Duration `json:",omitempty"` - - // MaxFailureRatio is the fraction of tasks that may fail during - // an update before the failure action is invoked. Any task created by - // the current update which ends up in one of the states REJECTED, - // COMPLETED or FAILED within Monitor from its creation counts as a - // failure. The number of failures is divided by the number of tasks - // being updated, and if this fraction is greater than - // MaxFailureRatio, the failure action is invoked. - // - // If the failure action is CONTINUE, there is no effect. - // If the failure action is PAUSE, no more tasks will be updated until - // another update is started. - MaxFailureRatio float32 - - // Order indicates the order of operations when rolling out an updated - // task. Either the old task is shut down before the new task is - // started, or the new task is started before the old task is shut down. - Order string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go deleted file mode 100644 index 1b111d725b8f2..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ /dev/null @@ -1,217 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// ClusterInfo represents info about the cluster for outputting in "info" -// it contains the same information as "Swarm", but without the JoinTokens -type ClusterInfo struct { - ID string - Meta - Spec Spec - TLSInfo TLSInfo - RootRotationInProgress bool -} - -// Swarm represents a swarm. -type Swarm struct { - ClusterInfo - JoinTokens JoinTokens -} - -// JoinTokens contains the tokens workers and managers need to join the swarm. -type JoinTokens struct { - // Worker is the join token workers may use to join the swarm. - Worker string - // Manager is the join token managers may use to join the swarm. - Manager string -} - -// Spec represents the spec of a swarm. -type Spec struct { - Annotations - - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - TaskDefaults TaskDefaults `json:",omitempty"` - EncryptionConfig EncryptionConfig `json:",omitempty"` -} - -// OrchestrationConfig represents orchestration configuration. -type OrchestrationConfig struct { - // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or - // node. If negative, never remove completed or failed tasks. - TaskHistoryRetentionLimit *int64 `json:",omitempty"` -} - -// TaskDefaults parameterizes cluster-level task creation with default values. -type TaskDefaults struct { - // LogDriver selects the log driver to use for tasks created in the - // orchestrator if unspecified by a service. - // - // Updating this value will only have an affect on new tasks. Old tasks - // will continue use their previously configured log driver until - // recreated. - LogDriver *Driver `json:",omitempty"` -} - -// EncryptionConfig controls at-rest encryption of data and keys. -type EncryptionConfig struct { - // AutoLockManagers specifies whether or not managers TLS keys and raft data - // should be encrypted at rest in such a way that they must be unlocked - // before the manager node starts up again. - AutoLockManagers bool -} - -// RaftConfig represents raft configuration. -type RaftConfig struct { - // SnapshotInterval is the number of log entries between snapshots. - SnapshotInterval uint64 `json:",omitempty"` - - // KeepOldSnapshots is the number of snapshots to keep beyond the - // current snapshot. - KeepOldSnapshots *uint64 `json:",omitempty"` - - // LogEntriesForSlowFollowers is the number of log entries to keep - // around to sync up slow followers after a snapshot is created. - LogEntriesForSlowFollowers uint64 `json:",omitempty"` - - // ElectionTick is the number of ticks that a follower will wait for a message - // from the leader before becoming a candidate and starting an election. - // ElectionTick must be greater than HeartbeatTick. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - ElectionTick int - - // HeartbeatTick is the number of ticks between heartbeats. Every - // HeartbeatTick ticks, the leader will send a heartbeat to the - // followers. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - HeartbeatTick int -} - -// DispatcherConfig represents dispatcher configuration. -type DispatcherConfig struct { - // HeartbeatPeriod defines how often agent should send heartbeats to - // dispatcher. - HeartbeatPeriod time.Duration `json:",omitempty"` -} - -// CAConfig represents CA configuration. -type CAConfig struct { - // NodeCertExpiry is the duration certificates should be issued for - NodeCertExpiry time.Duration `json:",omitempty"` - - // ExternalCAs is a list of CAs to which a manager node will make - // certificate signing requests for node certificates. - ExternalCAs []*ExternalCA `json:",omitempty"` - - // SigningCACert and SigningCAKey specify the desired signing root CA and - // root CA key for the swarm. When inspecting the cluster, the key will - // be redacted. - SigningCACert string `json:",omitempty"` - SigningCAKey string `json:",omitempty"` - - // If this value changes, and there is no specified signing cert and key, - // then the swarm is forced to generate a new root certificate ane key. - ForceRotate uint64 `json:",omitempty"` -} - -// ExternalCAProtocol represents type of external CA. -type ExternalCAProtocol string - -// ExternalCAProtocolCFSSL CFSSL -const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" - -// ExternalCA defines external CA to be used by the cluster. -type ExternalCA struct { - // Protocol is the protocol used by this external CA. - Protocol ExternalCAProtocol - - // URL is the URL where the external CA can be reached. - URL string - - // Options is a set of additional key/value pairs whose interpretation - // depends on the specified CA type. - Options map[string]string `json:",omitempty"` - - // CACert specifies which root CA is used by this external CA. This certificate must - // be in PEM format. - CACert string -} - -// InitRequest is the request used to init a swarm. -type InitRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - ForceNewCluster bool - Spec Spec - AutoLockManagers bool - Availability NodeAvailability -} - -// JoinRequest is the request used to join a swarm. -type JoinRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - RemoteAddrs []string - JoinToken string // accept by secret - Availability NodeAvailability -} - -// UnlockRequest is the request used to unlock a swarm. -type UnlockRequest struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// LocalNodeState represents the state of the local node. -type LocalNodeState string - -const ( - // LocalNodeStateInactive INACTIVE - LocalNodeStateInactive LocalNodeState = "inactive" - // LocalNodeStatePending PENDING - LocalNodeStatePending LocalNodeState = "pending" - // LocalNodeStateActive ACTIVE - LocalNodeStateActive LocalNodeState = "active" - // LocalNodeStateError ERROR - LocalNodeStateError LocalNodeState = "error" - // LocalNodeStateLocked LOCKED - LocalNodeStateLocked LocalNodeState = "locked" -) - -// Info represents generic information about swarm. -type Info struct { - NodeID string - NodeAddr string - - LocalNodeState LocalNodeState - ControlAvailable bool - Error string - - RemoteManagers []Peer - Nodes int `json:",omitempty"` - Managers int `json:",omitempty"` - - Cluster *ClusterInfo `json:",omitempty"` -} - -// Peer represents a peer. -type Peer struct { - NodeID string - Addr string -} - -// UpdateFlags contains flags for SwarmUpdate. -type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool - RotateManagerUnlockKey bool -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go deleted file mode 100644 index b35605d12fd2a..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ /dev/null @@ -1,191 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/swarm/runtime" -) - -// TaskState represents the state of a task. -type TaskState string - -const ( - // TaskStateNew NEW - TaskStateNew TaskState = "new" - // TaskStateAllocated ALLOCATED - TaskStateAllocated TaskState = "allocated" - // TaskStatePending PENDING - TaskStatePending TaskState = "pending" - // TaskStateAssigned ASSIGNED - TaskStateAssigned TaskState = "assigned" - // TaskStateAccepted ACCEPTED - TaskStateAccepted TaskState = "accepted" - // TaskStatePreparing PREPARING - TaskStatePreparing TaskState = "preparing" - // TaskStateReady READY - TaskStateReady TaskState = "ready" - // TaskStateStarting STARTING - TaskStateStarting TaskState = "starting" - // TaskStateRunning RUNNING - TaskStateRunning TaskState = "running" - // TaskStateComplete COMPLETE - TaskStateComplete TaskState = "complete" - // TaskStateShutdown SHUTDOWN - TaskStateShutdown TaskState = "shutdown" - // TaskStateFailed FAILED - TaskStateFailed TaskState = "failed" - // TaskStateRejected REJECTED - TaskStateRejected TaskState = "rejected" - // TaskStateRemove REMOVE - TaskStateRemove TaskState = "remove" - // TaskStateOrphaned ORPHANED - TaskStateOrphaned TaskState = "orphaned" -) - -// Task represents a task. -type Task struct { - ID string - Meta - Annotations - - Spec TaskSpec `json:",omitempty"` - ServiceID string `json:",omitempty"` - Slot int `json:",omitempty"` - NodeID string `json:",omitempty"` - Status TaskStatus `json:",omitempty"` - DesiredState TaskState `json:",omitempty"` - NetworksAttachments []NetworkAttachment `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` -} - -// TaskSpec represents the spec of a task. -type TaskSpec struct { - // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. - // PluginSpec is only used when the `Runtime` field is set to `plugin` - // NetworkAttachmentSpec is used if the `Runtime` field is set to - // `attachment`. - ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` - NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` - - Resources *ResourceRequirements `json:",omitempty"` - RestartPolicy *RestartPolicy `json:",omitempty"` - Placement *Placement `json:",omitempty"` - Networks []NetworkAttachmentConfig `json:",omitempty"` - - // LogDriver specifies the LogDriver to use for tasks created from this - // spec. If not present, the one on cluster default on swarm.Spec will be - // used, finally falling back to the engine default if not specified. - LogDriver *Driver `json:",omitempty"` - - // ForceUpdate is a counter that triggers an update even if no relevant - // parameters have been changed. - ForceUpdate uint64 - - Runtime RuntimeType `json:",omitempty"` -} - -// Resources represents resources (CPU/Memory). -type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` -} - -// GenericResource represents a "user defined" resource which can -// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) -type GenericResource struct { - NamedResourceSpec *NamedGenericResource `json:",omitempty"` - DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` -} - -// NamedGenericResource represents a "user defined" resource which is defined -// as a string. -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) -type NamedGenericResource struct { - Kind string `json:",omitempty"` - Value string `json:",omitempty"` -} - -// DiscreteGenericResource represents a "user defined" resource which is defined -// as an integer -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to count the resource (SSD=5, HDD=3, ...) -type DiscreteGenericResource struct { - Kind string `json:",omitempty"` - Value int64 `json:",omitempty"` -} - -// ResourceRequirements represents resources requirements. -type ResourceRequirements struct { - Limits *Resources `json:",omitempty"` - Reservations *Resources `json:",omitempty"` -} - -// Placement represents orchestration parameters. -type Placement struct { - Constraints []string `json:",omitempty"` - Preferences []PlacementPreference `json:",omitempty"` - - // Platforms stores all the platforms that the image can run on. - // This field is used in the platform filter for scheduling. If empty, - // then the platform filter is off, meaning there are no scheduling restrictions. - Platforms []Platform `json:",omitempty"` -} - -// PlacementPreference provides a way to make the scheduler aware of factors -// such as topology. -type PlacementPreference struct { - Spread *SpreadOver -} - -// SpreadOver is a scheduling preference that instructs the scheduler to spread -// tasks evenly over groups of nodes identified by labels. -type SpreadOver struct { - // label descriptor, such as engine.labels.az - SpreadDescriptor string -} - -// RestartPolicy represents the restart policy. -type RestartPolicy struct { - Condition RestartPolicyCondition `json:",omitempty"` - Delay *time.Duration `json:",omitempty"` - MaxAttempts *uint64 `json:",omitempty"` - Window *time.Duration `json:",omitempty"` -} - -// RestartPolicyCondition represents when to restart. -type RestartPolicyCondition string - -const ( - // RestartPolicyConditionNone NONE - RestartPolicyConditionNone RestartPolicyCondition = "none" - // RestartPolicyConditionOnFailure ON_FAILURE - RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" - // RestartPolicyConditionAny ANY - RestartPolicyConditionAny RestartPolicyCondition = "any" -) - -// TaskStatus represents the status of a task. -type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus *ContainerStatus `json:",omitempty"` - PortStatus PortStatus `json:",omitempty"` -} - -// ContainerStatus represents the status of a container. -type ContainerStatus struct { - ContainerID string - PID int - ExitCode int -} - -// PortStatus represents the port status of a task's host ports whose -// service has published host ports -type PortStatus struct { - Ports []PortConfig `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go deleted file mode 100644 index 729f4eb6c4912..0000000000000 --- a/vendor/github.com/docker/docker/api/types/types.go +++ /dev/null @@ -1,587 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "errors" - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/go-connections/nat" -) - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` -} - -// ImageInspect contains response of Engine API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string - ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Os string - OsVersion string `json:",omitempty"` - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS - Metadata ImageMetadata -} - -// ImageMetadata contains engine-local data about the image -type ImageMetadata struct { - LastTagTime time.Time `json:",omitempty"` -} - -// Container contains response of Engine API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Engine API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerStats contains response of Engine API: -// GET "/stats" -type ContainerStats struct { - Body io.ReadCloser `json:"body"` - OSType string `json:"ostype"` -} - -// Ping contains response of Engine API: -// GET "/_ping" -type Ping struct { - APIVersion string - OSType string - Experimental bool -} - -// ComponentVersion describes the version information for a specific component. -type ComponentVersion struct { - Name string - Version string - Details map[string]string `json:",omitempty"` -} - -// Version contains response of Engine API: -// GET "/version" -type Version struct { - Platform struct{ Name string } `json:",omitempty"` - Components []ComponentVersion `json:",omitempty"` - - // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility - - Version string - APIVersion string `json:"ApiVersion"` - MinAPIVersion string `json:"MinAPIVersion,omitempty"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. -type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. -} - -// Info contains response of Engine API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - LoggingDriver string - CgroupDriver string - NEventsListener int - KernelVersion string - OperatingSystem string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - GenericResources []swarm.GenericResource - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - ClusterStore string - ClusterAdvertise string - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool - Isolation container.Isolation - InitBinary string - ContainerdCommit Commit - RuncCommit Commit - InitCommit Commit - SecurityOptions []string -} - -// KeyValue holds a key/value pair -type KeyValue struct { - Key, Value string -} - -// SecurityOpt contains the name and options of a security option -type SecurityOpt struct { - Name string - Options []KeyValue -} - -// DecodeSecurityOptions decodes a security options string slice to a type safe -// SecurityOpt -func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { - so := []SecurityOpt{} - for _, opt := range opts { - // support output from a < 1.13 docker daemon - if !strings.Contains(opt, "=") { - so = append(so, SecurityOpt{Name: opt}) - continue - } - secopt := SecurityOpt{} - split := strings.Split(opt, ",") - for _, s := range split { - kv := strings.SplitN(s, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("invalid security option %q", s) - } - if kv[0] == "" || kv[1] == "" { - return nil, errors.New("invalid empty security option") - } - if kv[0] == "name" { - secopt.Name = kv[1] - continue - } - secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) - } - so = append(so, secopt) - } - return so, nil -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string - // List of Log plugins registered - Log []string -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool -} - -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe - Output string // Output from last check -} - -// Health states -const ( - NoHealthcheck = "none" // Indicates there is no healthcheck - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string - Health *Health `json:",omitempty"` -} - -// ContainerNode stores information about the node that a container -// is running on. It's only available in Docker Swarm -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} - -// ContainerJSONBase contains response of Engine API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` - Name string - RestartCount int - Driver string - Platform string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -// This is used for reporting the mountpoints in use by a container. -type MountPoint struct { - Type mount.Type `json:",omitempty"` - Name string `json:",omitempty"` - Source string - Destination string - Driver string `json:",omitempty"` - Mode string - RW bool - Propagation mount.Propagation -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. - ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network - Services map[string]network.ServiceInfo `json:",omitempty"` -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - // Check for networks with duplicate names. - // Network is primarily keyed based on a random ID and not on the name. - // Network name is strictly a user-friendly alias to the network - // which is uniquely identified using ID. - // And there is no guaranteed way to check for duplicates. - // Option CheckDuplicate is there to provide a best effort checking of any networks - // which has the same name but it is not guaranteed to catch all name collisions. - CheckDuplicate bool - Driver string - Scope string - EnableIPv6 bool - IPAM *network.IPAM - Internal bool - Attachable bool - Ingress bool - ConfigOnly bool - ConfigFrom *network.ConfigReference - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} - -// NetworkInspectOptions holds parameters to inspect network -type NetworkInspectOptions struct { - Scope string - Verbose bool -} - -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -// Runtime describes an OCI runtime -type Runtime struct { - Path string `json:"path"` - Args []string `json:"runtimeArgs,omitempty"` -} - -// DiskUsage contains response of Engine API: -// GET "/system/df" -type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*Volume - BuilderSize int64 -} - -// ContainersPruneReport contains the response for Engine API: -// POST "/containers/prune" -type ContainersPruneReport struct { - ContainersDeleted []string - SpaceReclaimed uint64 -} - -// VolumesPruneReport contains the response for Engine API: -// POST "/volumes/prune" -type VolumesPruneReport struct { - VolumesDeleted []string - SpaceReclaimed uint64 -} - -// ImagesPruneReport contains the response for Engine API: -// POST "/images/prune" -type ImagesPruneReport struct { - ImagesDeleted []ImageDeleteResponseItem - SpaceReclaimed uint64 -} - -// BuildCachePruneReport contains the response for Engine API: -// POST "/build/prune" -type BuildCachePruneReport struct { - SpaceReclaimed uint64 -} - -// NetworksPruneReport contains the response for Engine API: -// POST "/networks/prune" -type NetworksPruneReport struct { - NetworksDeleted []string -} - -// SecretCreateResponse contains the information returned to a client -// on the creation of a new secret. -type SecretCreateResponse struct { - // ID is the id of the created secret. - ID string -} - -// SecretListOptions holds parameters to list secrets -type SecretListOptions struct { - Filters filters.Args -} - -// ConfigCreateResponse contains the information returned to a client -// on the creation of a new config. -type ConfigCreateResponse struct { - // ID is the id of the created config. - ID string -} - -// ConfigListOptions holds parameters to list configs -type ConfigListOptions struct { - Filters filters.Args -} - -// PushResult contains the tag, manifest digest, and manifest size from the -// push. It's used to signal this information to the trust code in the client -// so it can sign the manifest if necessary. -type PushResult struct { - Tag string - Digest string - Size int -} - -// BuildResult contains the image id of a successful build -type BuildResult struct { - ID string -} diff --git a/vendor/github.com/docker/docker/api/types/versions/BUILD.bazel b/vendor/github.com/docker/docker/api/types/versions/BUILD.bazel deleted file mode 100644 index 893f988947d97..0000000000000 --- a/vendor/github.com/docker/docker/api/types/versions/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["compare.go"], - importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/versions", - importpath = "github.com/docker/docker/api/types/versions", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md deleted file mode 100644 index 1ef911edb0f9a..0000000000000 --- a/vendor/github.com/docker/docker/api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -## Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go deleted file mode 100644 index 8ccb0aa92ebe4..0000000000000 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ /dev/null @@ -1,62 +0,0 @@ -package versions // import "github.com/docker/docker/api/types/versions" - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go deleted file mode 100644 index b5ee96a500586..0000000000000 --- a/vendor/github.com/docker/docker/api/types/volume.go +++ /dev/null @@ -1,69 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Volume volume -// swagger:model Volume -type Volume struct { - - // Date/Time the volume was created. - CreatedAt string `json:"CreatedAt,omitempty"` - - // Name of the volume driver used by the volume. - // Required: true - Driver string `json:"Driver"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // Mount path of the volume on the host. - // Required: true - Mountpoint string `json:"Mountpoint"` - - // Name of the volume. - // Required: true - Name string `json:"Name"` - - // The driver specific options used when creating the volume. - // Required: true - Options map[string]string `json:"Options"` - - // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. - // Required: true - Scope string `json:"Scope"` - - // Low-level details about the volume, provided by the volume driver. - // Details are returned as a map with key/value pairs: - // `{"key":"value","key2":"value2"}`. - // - // The `Status` field is optional, and is omitted if the volume driver - // does not support this feature. - // - Status map[string]interface{} `json:"Status,omitempty"` - - // usage data - UsageData *VolumeUsageData `json:"UsageData,omitempty"` -} - -// VolumeUsageData Usage details about the volume. This information is used by the -// `GET /system/df` endpoint, and omitted in other endpoints. -// -// swagger:model VolumeUsageData -type VolumeUsageData struct { - - // The number of containers referencing this volume. This field - // is set to `-1` if the reference-count is not available. - // - // Required: true - RefCount int64 `json:"RefCount"` - - // Amount of disk space used by the volume (in bytes). This information - // is only available for volumes created with the `"local"` volume - // driver. For volumes created with other volume drivers, this field - // is set to `-1` ("not available") - // - // Required: true - Size int64 `json:"Size"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/BUILD.bazel b/vendor/github.com/opencontainers/image-spec/specs-go/BUILD.bazel deleted file mode 100644 index 47507a2315716..0000000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "version.go", - "versioned.go", - ], - importmap = "k8s.io/kops/vendor/github.com/opencontainers/image-spec/specs-go", - importpath = "github.com/opencontainers/image-spec/specs-go", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/BUILD.bazel b/vendor/github.com/opencontainers/image-spec/specs-go/v1/BUILD.bazel deleted file mode 100644 index a31d50f0e9910..0000000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "annotations.go", - "config.go", - "descriptor.go", - "index.go", - "layout.go", - "manifest.go", - "mediatype.go", - ], - importmap = "k8s.io/kops/vendor/github.com/opencontainers/image-spec/specs-go/v1", - importpath = "github.com/opencontainers/image-spec/specs-go/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/opencontainers/go-digest:go_default_library", - "//vendor/github.com/opencontainers/image-spec/specs-go:go_default_library", - ], -) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go deleted file mode 100644 index 35d8108958ff0..0000000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -const ( - // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). - AnnotationCreated = "org.opencontainers.image.created" - - // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). - AnnotationAuthors = "org.opencontainers.image.authors" - - // AnnotationURL is the annotation key for the URL to find more information on the image. - AnnotationURL = "org.opencontainers.image.url" - - // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. - AnnotationDocumentation = "org.opencontainers.image.documentation" - - // AnnotationSource is the annotation key for the URL to get source code for building the image. - AnnotationSource = "org.opencontainers.image.source" - - // AnnotationVersion is the annotation key for the version of the packaged software. - // The version MAY match a label or tag in the source code repository. - // The version MAY be Semantic versioning-compatible. - AnnotationVersion = "org.opencontainers.image.version" - - // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. - AnnotationRevision = "org.opencontainers.image.revision" - - // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. - AnnotationVendor = "org.opencontainers.image.vendor" - - // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. - AnnotationLicenses = "org.opencontainers.image.licenses" - - // AnnotationRefName is the annotation key for the name of the reference for a target. - // SHOULD only be considered valid when on descriptors on `index.json` within image layout. - AnnotationRefName = "org.opencontainers.image.ref.name" - - // AnnotationTitle is the annotation key for the human-readable title of the image. - AnnotationTitle = "org.opencontainers.image.title" - - // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. - AnnotationDescription = "org.opencontainers.image.description" -) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go deleted file mode 100644 index 8475ff74196e9..0000000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "time" - - digest "github.com/opencontainers/go-digest" -) - -// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. -type ImageConfig struct { - // User defines the username or UID which the process in the container should run as. - User string `json:"User,omitempty"` - - // ExposedPorts a set of ports to expose from a container running this image. - ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` - - // Env is a list of environment variables to be used in a container. - Env []string `json:"Env,omitempty"` - - // Entrypoint defines a list of arguments to use as the command to execute when the container starts. - Entrypoint []string `json:"Entrypoint,omitempty"` - - // Cmd defines the default arguments to the entrypoint of the container. - Cmd []string `json:"Cmd,omitempty"` - - // Volumes is a set of directories which should be created as data volumes in a container running this image. - Volumes map[string]struct{} `json:"Volumes,omitempty"` - - // WorkingDir sets the current working directory of the entrypoint process in the container. - WorkingDir string `json:"WorkingDir,omitempty"` - - // Labels contains arbitrary metadata for the container. - Labels map[string]string `json:"Labels,omitempty"` - - // StopSignal contains the system call signal that will be sent to the container to exit. - StopSignal string `json:"StopSignal,omitempty"` -} - -// RootFS describes a layer content addresses -type RootFS struct { - // Type is the type of the rootfs. - Type string `json:"type"` - - // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. - DiffIDs []digest.Digest `json:"diff_ids"` -} - -// History describes the history of a layer. -type History struct { - // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. - Created *time.Time `json:"created,omitempty"` - - // CreatedBy is the command which created the layer. - CreatedBy string `json:"created_by,omitempty"` - - // Author is the author of the build point. - Author string `json:"author,omitempty"` - - // Comment is a custom message set when creating the layer. - Comment string `json:"comment,omitempty"` - - // EmptyLayer is used to mark if the history item created a filesystem diff. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Image is the JSON structure which describes some basic information about the image. -// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. -type Image struct { - // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. - Created *time.Time `json:"created,omitempty"` - - // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. - Author string `json:"author,omitempty"` - - // Architecture is the CPU architecture which the binaries in this image are built to run on. - Architecture string `json:"architecture"` - - // OS is the name of the operating system which the image is built to run on. - OS string `json:"os"` - - // Config defines the execution parameters which should be used as a base when running a container using the image. - Config ImageConfig `json:"config,omitempty"` - - // RootFS references the layer content addresses used by the image. - RootFS RootFS `json:"rootfs"` - - // History describes the history of each layer. - History []History `json:"history,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go deleted file mode 100644 index 6e442a0853f4b..0000000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import digest "github.com/opencontainers/go-digest" - -// Descriptor describes the disposition of targeted content. -// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype -// when marshalled to JSON. -type Descriptor struct { - // MediaType is the media type of the object this schema refers to. - MediaType string `json:"mediaType,omitempty"` - - // Digest is the digest of the targeted content. - Digest digest.Digest `json:"digest"` - - // Size specifies the size in bytes of the blob. - Size int64 `json:"size"` - - // URLs specifies a list of URLs from which this object MAY be downloaded - URLs []string `json:"urls,omitempty"` - - // Annotations contains arbitrary metadata relating to the targeted content. - Annotations map[string]string `json:"annotations,omitempty"` - - // Platform describes the platform which the image in the manifest runs on. - // - // This should only be used when referring to a manifest. - Platform *Platform `json:"platform,omitempty"` -} - -// Platform describes the platform which the image in the manifest runs on. -type Platform struct { - // Architecture field specifies the CPU architecture, for example - // `amd64` or `ppc64`. - Architecture string `json:"architecture"` - - // OS specifies the operating system, for example `linux` or `windows`. - OS string `json:"os"` - - // OSVersion is an optional field specifying the operating system - // version, for example on Windows `10.0.14393.1066`. - OSVersion string `json:"os.version,omitempty"` - - // OSFeatures is an optional field specifying an array of strings, - // each listing a required OS feature (for example on Windows `win32k`). - OSFeatures []string `json:"os.features,omitempty"` - - // Variant is an optional field specifying a variant of the CPU, for - // example `v7` to specify ARMv7 when architecture is `arm`. - Variant string `json:"variant,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go deleted file mode 100644 index 4e6c4b236237d..0000000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import "github.com/opencontainers/image-spec/specs-go" - -// Index references manifests for various platforms. -// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. -type Index struct { - specs.Versioned - - // Manifests references platform specific manifests. - Manifests []Descriptor `json:"manifests"` - - // Annotations contains arbitrary metadata for the image index. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go deleted file mode 100644 index fc79e9e0d140f..0000000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -const ( - // ImageLayoutFile is the file name of oci image layout file - ImageLayoutFile = "oci-layout" - // ImageLayoutVersion is the version of ImageLayout - ImageLayoutVersion = "1.0.0" -) - -// ImageLayout is the structure in the "oci-layout" file, found in the root -// of an OCI Image-layout directory. -type ImageLayout struct { - Version string `json:"imageLayoutVersion"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go deleted file mode 100644 index 7ff32c40ba305..0000000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import "github.com/opencontainers/image-spec/specs-go" - -// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. -type Manifest struct { - specs.Versioned - - // Config references a configuration object for a container, by digest. - // The referenced configuration object is a JSON blob that the runtime uses to set up the container. - Config Descriptor `json:"config"` - - // Layers is an indexed list of layers referenced by the manifest. - Layers []Descriptor `json:"layers"` - - // Annotations contains arbitrary metadata for the image manifest. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go deleted file mode 100644 index bad7bb97f4734..0000000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -const ( - // MediaTypeDescriptor specifies the media type for a content descriptor. - MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" - - // MediaTypeLayoutHeader specifies the media type for the oci-layout. - MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" - - // MediaTypeImageManifest specifies the media type for an image manifest. - MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" - - // MediaTypeImageIndex specifies the media type for an image index. - MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" - - // MediaTypeImageLayer is the media type used for layers referenced by the manifest. - MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" - - // MediaTypeImageLayerGzip is the media type used for gzipped layers - // referenced by the manifest. - MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" - - // MediaTypeImageLayerNonDistributable is the media type for layers referenced by - // the manifest but with distribution restrictions. - MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" - - // MediaTypeImageLayerNonDistributableGzip is the media type for - // gzipped layers referenced by the manifest but with distribution - // restrictions. - MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" - - // MediaTypeImageConfig specifies the media type for the image configuration. - MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" -) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go deleted file mode 100644 index f4cda6ed8d7a5..0000000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package specs - -import "fmt" - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 1 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 0 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc6-dev" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go deleted file mode 100644 index 58a1510f33e94..0000000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package specs - -// Versioned provides a struct with the manifest schemaVersion and mediaType. -// Incoming content with unknown schema version can be decoded against this -// struct to check the version. -type Versioned struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` -} diff --git a/vendor/k8s.io/api/admission/v1beta1/doc.go b/vendor/k8s.io/api/admission/v1beta1/doc.go index a26d4d45a45e3..f5135f0f30816 100644 --- a/vendor/k8s.io/api/admission/v1beta1/doc.go +++ b/vendor/k8s.io/api/admission/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=false // +groupName=admission.k8s.io + package v1beta1 // import "k8s.io/api/admission/v1beta1" diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.pb.go b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go index d2b938e5a279c..4082082ff9422 100644 --- a/vendor/k8s.io/api/admission/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -274,24 +273,6 @@ func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1014,51 +995,14 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.AuditAnnotations == nil { m.AuditAnnotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1068,41 +1012,80 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.AuditAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.AuditAnnotations[mapkey] = mapvalue } + m.AuditAnnotations[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go index 8a5d1fbbb6e0c..d29913cf52cb6 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -16,10 +16,10 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // InitializerConfiguration and validatingWebhookConfiguration is for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1" diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go index b87f74e520767..74c467a2223f2 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -251,24 +250,6 @@ func (m *Rule) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto index e17b559689d9e..98e9a571a2cf6 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -88,7 +88,7 @@ message Rule { repeated string apiVersions = 2; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' means pods. // 'pods/log' means the log subresource of pods. @@ -96,10 +96,10 @@ message Rule { // 'pods/*' means all subresources of pods. // '*/scale' means all scale subresources. // '*/*' means all resources and their subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // Depending on the enclosing object, subresources might not be allowed. // Required. repeated string resources = 3; diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go index afbb3d6d3ad64..2b29efaca4aa4 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -16,10 +16,10 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io // Package v1beta1 is the v1beta1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // InitializerConfiguration and validatingWebhookConfiguration is for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1" diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index d6c9d958bf548..2ca3fa6524fb2 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -506,24 +505,6 @@ func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 4d55ca878a9da..1c40ae530dea2 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -66,7 +66,7 @@ message Rule { repeated string apiVersions = 2; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' means pods. // 'pods/log' means the log subresource of pods. @@ -74,10 +74,10 @@ message Rule { // 'pods/*' means all subresources of pods. // '*/scale' means all scale subresources. // '*/*' means all resources and their subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // Depending on the enclosing object, subresources might not be allowed. // Required. repeated string resources = 3; @@ -168,7 +168,7 @@ message Webhook { // object itself is a namespace, the matching is performed on // object.metadata.labels. If the object is another cluster scoped resource, // it never skips the webhook. - // + // // For example, to run the webhook on any objects whose namespace is not // associated with "runlevel" of "0" or "1"; you will set the selector as // follows: @@ -184,7 +184,7 @@ message Webhook { // } // ] // } - // + // // If instead you want to only run the webhook on any objects whose // namespace is associated with the "environment" of "prod" or "staging"; // you will set the selector as follows: @@ -200,11 +200,11 @@ message Webhook { // } // ] // } - // + // // See // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // for more examples of label selectors. - // + // // Default to the empty LabelSelector, which matches everything. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; @@ -223,47 +223,47 @@ message Webhook { // connection with the webhook message WebhookClientConfig { // `url` gives the location of the webhook, in standard URL form - // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // (`scheme://host:port/path`). Exactly one of `url` or `service` // must be specified. - // + // // The `host` should not refer to a service running in the cluster; use // the `service` field instead. The host might be resolved via external // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve // in-cluster DNS as that would be a layering violation). `host` may // also be an IP address. - // + // // Please note that using `localhost` or `127.0.0.1` as a `host` is // risky unless you take great care to run this webhook on all hosts // which run an apiserver which might need to make calls to this // webhook. Such installs are likely to be non-portable, i.e., not easy // to turn up in a new cluster. - // + // // The scheme must be "https"; the URL must begin with "https://". - // + // // A path is optional, and if present may be any string permissible in // a URL. You may use the path to pass an arbitrary string to the // webhook, for example, a cluster identifier. - // + // // Attempting to use a user or basic auth e.g. "user:password@" is not // allowed. Fragments ("#...") and query parameters ("?...") are not // allowed, either. - // + // // +optional optional string url = 3; // `service` is a reference to the service for this webhook. Either // `service` or `url` must be specified. - // + // // If the webhook is running within the cluster, then you should use `service`. - // + // // Port 443 will be used if it is open, otherwise it is an error. - // + // // +optional optional ServiceReference service = 1; - // `caBundle` is a PEM encoded CA bundle which will be used to validate - // the webhook's server certificate. - // Required. + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional optional bytes caBundle = 2; } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 0b948ba1df9d4..49d94ec0eb758 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -246,7 +246,7 @@ const ( // connection with the webhook type WebhookClientConfig struct { // `url` gives the location of the webhook, in standard URL form - // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // (`scheme://host:port/path`). Exactly one of `url` or `service` // must be specified. // // The `host` should not refer to a service running in the cluster; use @@ -282,12 +282,12 @@ type WebhookClientConfig struct { // Port 443 will be used if it is open, otherwise it is an error. // // +optional - Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` - // `caBundle` is a PEM encoded CA bundle which will be used to validate - // the webhook's server certificate. - // Required. - CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` } // ServiceReference holds a reference to Service.legacy.k8s.io diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index aab917a4028af..e97628aab7cf6 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -114,9 +114,9 @@ func (Webhook) SwaggerDoc() map[string]string { var map_WebhookClientConfig = map[string]string{ "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", - "url": "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.", - "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", } func (WebhookClientConfig) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index eac6ef2a12c17..5b29f432080b9 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -1440,24 +1439,6 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index c8a957ac68086..fea81922f3b6b 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -280,6 +280,7 @@ message DeploymentSpec { // The deployment strategy to use to replace existing pods with new ones. // +optional + // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; // Minimum number of seconds for which a newly created pod should be ready diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 4431ca2c31709..68ac55bf17ab3 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -32,6 +32,8 @@ const ( ) // +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // StatefulSet represents a set of pods with consistent identities. @@ -244,6 +246,8 @@ type StatefulSetList struct { } // +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Deployment enables declarative updates for Pods and ReplicaSets. @@ -279,7 +283,8 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. @@ -653,6 +658,8 @@ type DaemonSetList struct { } // +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicaSet ensures that a specified number of pod replicas are running at any given time. diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 85fb159ddcc8c..7e992c58469a1 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index ef9aa8e09f048..935304755409a 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -1091,24 +1090,6 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2552,51 +2533,14 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2606,41 +2550,80 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.UpdatedAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.UpdatedAnnotations[mapkey] = mapvalue } + m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -3833,51 +3816,14 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3887,41 +3833,80 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 6f41f06bcccef..f87f39fe94e94 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -143,6 +143,7 @@ message DeploymentSpec { // The deployment strategy to use to replace existing pods with new ones. // +optional + // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; // Minimum number of seconds for which a newly created pod should be ready diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index d462604d79eaa..326902fd0f860 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -55,8 +55,6 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } -// +genclient -// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Scale represents a scaling request for a resource. @@ -323,7 +321,8 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index 72d832c340793..fc1efbc90bca7 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto -// DO NOT EDIT! /* Package v1beta2 is a generated protocol buffer package. @@ -1570,24 +1569,6 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -6109,51 +6090,14 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6163,41 +6107,80 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index cc3656d284e95..5d11cbe8d810d 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -286,6 +286,7 @@ message DeploymentSpec { // The deployment strategy to use to replace existing pods with new ones. // +optional + // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; // Minimum number of seconds for which a newly created pod should be ready diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index e5525222a1ec1..e75589adc542a 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -57,8 +57,6 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } -// +genclient -// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Scale represents a scaling request for a resource. @@ -331,7 +329,8 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 627df3ab76de7..f8229ceda82f9 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/BUILD.bazel b/vendor/k8s.io/api/auditregistration/v1alpha1/BUILD.bazel new file mode 100644 index 0000000000000..16fe537e10ab0 --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.deepcopy.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/api/auditregistration/v1alpha1", + importpath = "k8s.io/api/auditregistration/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/doc.go b/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go similarity index 77% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/doc.go rename to vendor/k8s.io/api/auditregistration/v1alpha1/doc.go index 589fa1a64ad6d..c0d184a99843d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/doc.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go @@ -14,5 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package scalejob is deprecated This package contains deprecated functions used to "scale" jobs in a way inconsistent with normal scaling rules -package scalejob +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// +groupName=auditregistration.k8s.io + +package v1alpha1 // import "k8s.io/api/auditregistration/v1alpha1" diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go new file mode 100644 index 0000000000000..399d92b380eac --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go @@ -0,0 +1,1685 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto + + It has these top-level messages: + AuditSink + AuditSinkList + AuditSinkSpec + Policy + ServiceReference + Webhook + WebhookClientConfig + WebhookThrottleConfig +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *AuditSink) Reset() { *m = AuditSink{} } +func (*AuditSink) ProtoMessage() {} +func (*AuditSink) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *AuditSinkList) Reset() { *m = AuditSinkList{} } +func (*AuditSinkList) ProtoMessage() {} +func (*AuditSinkList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *AuditSinkSpec) Reset() { *m = AuditSinkSpec{} } +func (*AuditSinkSpec) ProtoMessage() {} +func (*AuditSinkSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *Policy) Reset() { *m = Policy{} } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *Webhook) Reset() { *m = Webhook{} } +func (*Webhook) ProtoMessage() {} +func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *WebhookThrottleConfig) Reset() { *m = WebhookThrottleConfig{} } +func (*WebhookThrottleConfig) ProtoMessage() {} +func (*WebhookThrottleConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func init() { + proto.RegisterType((*AuditSink)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSink") + proto.RegisterType((*AuditSinkList)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSinkList") + proto.RegisterType((*AuditSinkSpec)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSinkSpec") + proto.RegisterType((*Policy)(nil), "k8s.io.api.auditregistration.v1alpha1.Policy") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.auditregistration.v1alpha1.ServiceReference") + proto.RegisterType((*Webhook)(nil), "k8s.io.api.auditregistration.v1alpha1.Webhook") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookClientConfig") + proto.RegisterType((*WebhookThrottleConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookThrottleConfig") +} +func (m *AuditSink) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditSink) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *AuditSinkList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditSinkList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AuditSinkSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditSinkSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Policy.Size())) + n4, err := m.Policy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Webhook.Size())) + n5, err := m.Webhook.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *Policy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Policy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i += copy(dAtA[i:], m.Level) + if len(m.Stages) > 0 { + for _, s := range m.Stages { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Path != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) + } + return i, nil +} + +func (m *Webhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Throttle != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Throttle.Size())) + n6, err := m.Throttle.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) + n7, err := m.ClientConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.URL != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i += copy(dAtA[i:], *m.URL) + } + if m.Service != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) + n8, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.CABundle != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i += copy(dAtA[i:], m.CABundle) + } + return i, nil +} + +func (m *WebhookThrottleConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookThrottleConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.QPS != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.QPS)) + } + if m.Burst != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Burst)) + } + return i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *AuditSink) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AuditSinkList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AuditSinkSpec) Size() (n int) { + var l int + _ = l + l = m.Policy.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Policy) Size() (n int) { + var l int + _ = l + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Stages) > 0 { + for _, s := range m.Stages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceReference) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Webhook) Size() (n int) { + var l int + _ = l + if m.Throttle != nil { + l = m.Throttle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + var l int + _ = l + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WebhookThrottleConfig) Size() (n int) { + var l int + _ = l + if m.QPS != nil { + n += 1 + sovGenerated(uint64(*m.QPS)) + } + if m.Burst != nil { + n += 1 + sovGenerated(uint64(*m.Burst)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AuditSink) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditSink{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "AuditSinkSpec", "AuditSinkSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AuditSinkList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditSinkList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "AuditSink", "AuditSink", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AuditSinkSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditSinkSpec{`, + `Policy:` + strings.Replace(strings.Replace(this.Policy.String(), "Policy", "Policy", 1), `&`, ``, 1) + `,`, + `Webhook:` + strings.Replace(strings.Replace(this.Webhook.String(), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Policy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Policy{`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `Stages:` + fmt.Sprintf("%v", this.Stages) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *Webhook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Webhook{`, + `Throttle:` + strings.Replace(fmt.Sprintf("%v", this.Throttle), "WebhookThrottleConfig", "WebhookThrottleConfig", 1) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookThrottleConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookThrottleConfig{`, + `QPS:` + valueToStringGenerated(this.QPS) + `,`, + `Burst:` + valueToStringGenerated(this.Burst) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AuditSink) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditSink: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditSink: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditSinkList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditSinkList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditSinkList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, AuditSink{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditSinkSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditSinkSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Policy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Policy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Policy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = Level(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stages = append(m.Stages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Webhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Webhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Throttle == nil { + m.Throttle = &WebhookThrottleConfig{} + } + if err := m.Throttle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookThrottleConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookThrottleConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QPS", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.QPS = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Burst", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Burst = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 747 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0xd3, 0x48, + 0x14, 0x8e, 0x9b, 0xa4, 0x49, 0xa6, 0xe9, 0x6e, 0x77, 0xba, 0xbb, 0xca, 0x56, 0x2b, 0xa7, 0xb2, + 0xb4, 0x52, 0xa5, 0xdd, 0x8e, 0xb7, 0xa8, 0x02, 0x84, 0xb8, 0xd4, 0x3d, 0x21, 0x95, 0x52, 0x26, + 0x14, 0x04, 0x42, 0x88, 0x89, 0xf3, 0x62, 0x0f, 0x49, 0x6c, 0x63, 0x8f, 0x83, 0x7a, 0x43, 0xe2, + 0x0f, 0xf0, 0x7b, 0xb8, 0x21, 0x81, 0xd4, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0x1c, 0xf8, 0x0f, 0x9c, + 0xd0, 0x8c, 0xc7, 0x49, 0x68, 0x8a, 0x48, 0x6f, 0x33, 0xdf, 0xbc, 0xef, 0x7b, 0xdf, 0xf7, 0xde, + 0xa0, 0xfd, 0xde, 0xcd, 0x84, 0xf0, 0xd0, 0xee, 0xa5, 0x6d, 0x88, 0x03, 0x10, 0x90, 0xd8, 0x43, + 0x08, 0x3a, 0x61, 0x6c, 0xeb, 0x07, 0x16, 0x71, 0x9b, 0xa5, 0x1d, 0x2e, 0x62, 0xf0, 0x78, 0x22, + 0x62, 0x26, 0x78, 0x18, 0xd8, 0xc3, 0x2d, 0xd6, 0x8f, 0x7c, 0xb6, 0x65, 0x7b, 0x10, 0x40, 0xcc, + 0x04, 0x74, 0x48, 0x14, 0x87, 0x22, 0xc4, 0xff, 0x64, 0x34, 0xc2, 0x22, 0x4e, 0x66, 0x68, 0x24, + 0xa7, 0xad, 0x6d, 0x7a, 0x5c, 0xf8, 0x69, 0x9b, 0xb8, 0xe1, 0xc0, 0xf6, 0x42, 0x2f, 0xb4, 0x15, + 0xbb, 0x9d, 0x76, 0xd5, 0x4d, 0x5d, 0xd4, 0x29, 0x53, 0x5d, 0xdb, 0x9e, 0x98, 0x19, 0x30, 0xd7, + 0xe7, 0x01, 0xc4, 0x47, 0x76, 0xd4, 0xf3, 0x24, 0x90, 0xd8, 0x03, 0x10, 0xcc, 0x1e, 0xce, 0x78, + 0x59, 0xb3, 0x7f, 0xc4, 0x8a, 0xd3, 0x40, 0xf0, 0x01, 0xcc, 0x10, 0xae, 0xff, 0x8c, 0x90, 0xb8, + 0x3e, 0x0c, 0xd8, 0x45, 0x9e, 0xf5, 0xd1, 0x40, 0xb5, 0x1d, 0x19, 0xb6, 0xc5, 0x83, 0x1e, 0x7e, + 0x8e, 0xaa, 0xd2, 0x51, 0x87, 0x09, 0xd6, 0x30, 0xd6, 0x8d, 0x8d, 0xa5, 0x6b, 0xff, 0x93, 0xc9, + 0x54, 0xc6, 0xc2, 0x24, 0xea, 0x79, 0x12, 0x48, 0x88, 0xac, 0x26, 0xc3, 0x2d, 0x72, 0xaf, 0xfd, + 0x02, 0x5c, 0x71, 0x17, 0x04, 0x73, 0xf0, 0xf1, 0x59, 0xb3, 0x30, 0x3a, 0x6b, 0xa2, 0x09, 0x46, + 0xc7, 0xaa, 0xf8, 0x21, 0x2a, 0x25, 0x11, 0xb8, 0x8d, 0x05, 0xa5, 0xbe, 0x4d, 0xe6, 0x9a, 0x39, + 0x19, 0x3b, 0x6c, 0x45, 0xe0, 0x3a, 0x75, 0xdd, 0xa1, 0x24, 0x6f, 0x54, 0xe9, 0x59, 0x1f, 0x0c, + 0xb4, 0x3c, 0xae, 0xda, 0xe3, 0x89, 0xc0, 0x4f, 0x67, 0xb2, 0x90, 0xf9, 0xb2, 0x48, 0xb6, 0x4a, + 0xb2, 0xa2, 0xfb, 0x54, 0x73, 0x64, 0x2a, 0xc7, 0x21, 0x2a, 0x73, 0x01, 0x83, 0xa4, 0xb1, 0xb0, + 0x5e, 0xbc, 0x30, 0xa6, 0xb9, 0x82, 0x38, 0xcb, 0x5a, 0xbc, 0x7c, 0x47, 0xca, 0xd0, 0x4c, 0xcd, + 0x7a, 0x3f, 0x1d, 0x43, 0xc6, 0xc3, 0x87, 0x68, 0x31, 0x0a, 0xfb, 0xdc, 0x3d, 0xd2, 0x21, 0x36, + 0xe7, 0xec, 0x74, 0xa0, 0x48, 0xce, 0x2f, 0xba, 0xcd, 0x62, 0x76, 0xa7, 0x5a, 0x0c, 0x3f, 0x46, + 0x95, 0x57, 0xd0, 0xf6, 0xc3, 0xb0, 0xa7, 0x57, 0x41, 0xe6, 0xd4, 0x7d, 0x94, 0xb1, 0x9c, 0x5f, + 0xb5, 0x70, 0x45, 0x03, 0x34, 0xd7, 0xb3, 0x5c, 0xa4, 0x9b, 0xe1, 0xff, 0x50, 0xb9, 0x0f, 0x43, + 0xe8, 0x2b, 0xeb, 0x35, 0xe7, 0xcf, 0x3c, 0xf2, 0x9e, 0x04, 0xbf, 0xe6, 0x07, 0x9a, 0x15, 0xe1, + 0x7f, 0xd1, 0x62, 0x22, 0x98, 0x07, 0xd9, 0x4c, 0x6b, 0xce, 0xaa, 0xb4, 0xdd, 0x52, 0x88, 0xac, + 0x55, 0x27, 0xaa, 0x4b, 0xac, 0x37, 0x06, 0x5a, 0x69, 0x41, 0x3c, 0xe4, 0x2e, 0x50, 0xe8, 0x42, + 0x0c, 0x81, 0x0b, 0xd8, 0x46, 0xb5, 0x80, 0x0d, 0x20, 0x89, 0x98, 0x0b, 0xba, 0xe7, 0x6f, 0xba, + 0x67, 0x6d, 0x3f, 0x7f, 0xa0, 0x93, 0x1a, 0xbc, 0x8e, 0x4a, 0xf2, 0xa2, 0x46, 0x50, 0x9b, 0xfc, + 0x2b, 0x59, 0x4b, 0xd5, 0x0b, 0xfe, 0x1b, 0x95, 0x22, 0x26, 0xfc, 0x46, 0x51, 0x55, 0x54, 0xe5, + 0xeb, 0x01, 0x13, 0x3e, 0x55, 0xa8, 0xf5, 0xc5, 0x40, 0x79, 0x7e, 0xdc, 0x45, 0x55, 0xe1, 0xc7, + 0xa1, 0x10, 0x7d, 0xd0, 0xab, 0xba, 0x7d, 0xb5, 0x91, 0x3e, 0xd0, 0xec, 0xdd, 0x30, 0xe8, 0x72, + 0xcf, 0xa9, 0xcb, 0x9f, 0x97, 0x63, 0x74, 0xac, 0x8d, 0x05, 0xaa, 0xbb, 0x7d, 0x0e, 0x81, 0xc8, + 0xea, 0xf4, 0xfa, 0x6e, 0x5d, 0xad, 0xd7, 0xee, 0x94, 0x82, 0xf3, 0xbb, 0xce, 0x5d, 0x9f, 0x46, + 0xe9, 0x77, 0x5d, 0xac, 0x77, 0x06, 0x5a, 0xbd, 0x84, 0x8b, 0xff, 0x42, 0xc5, 0x34, 0xce, 0x17, + 0x5c, 0x19, 0x9d, 0x35, 0x8b, 0x87, 0x74, 0x8f, 0x4a, 0x0c, 0x3f, 0x43, 0x95, 0x24, 0xdb, 0x90, + 0xf6, 0x78, 0x63, 0x4e, 0x8f, 0x17, 0xf7, 0xea, 0x2c, 0xc9, 0x7f, 0x96, 0xa3, 0xb9, 0x28, 0xde, + 0x40, 0x55, 0x97, 0x39, 0x69, 0xd0, 0xe9, 0x83, 0x5a, 0x4f, 0x3d, 0x1b, 0xd9, 0xee, 0x4e, 0x86, + 0xd1, 0xf1, 0xab, 0xd5, 0x42, 0x7f, 0x5c, 0x3a, 0x63, 0xe9, 0xfe, 0x65, 0x94, 0x28, 0xf7, 0xc5, + 0xcc, 0xfd, 0xfd, 0x83, 0x16, 0x95, 0x18, 0x6e, 0xa2, 0x72, 0x3b, 0x8d, 0x13, 0xa1, 0xbc, 0x17, + 0x9d, 0x9a, 0xfc, 0xb7, 0x8e, 0x04, 0x68, 0x86, 0x3b, 0xe4, 0xf8, 0xdc, 0x2c, 0x9c, 0x9c, 0x9b, + 0x85, 0xd3, 0x73, 0xb3, 0xf0, 0x7a, 0x64, 0x1a, 0xc7, 0x23, 0xd3, 0x38, 0x19, 0x99, 0xc6, 0xe9, + 0xc8, 0x34, 0x3e, 0x8d, 0x4c, 0xe3, 0xed, 0x67, 0xb3, 0xf0, 0xa4, 0x9a, 0xa7, 0xfa, 0x16, 0x00, + 0x00, 0xff, 0xff, 0x55, 0x1b, 0x03, 0x56, 0xaf, 0x06, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto new file mode 100644 index 0000000000000..70801a6c515dc --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto @@ -0,0 +1,158 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.auditregistration.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// AuditSink represents a cluster level audit sink +message AuditSink { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the audit configuration spec + optional AuditSinkSpec spec = 2; +} + +// AuditSinkList is a list of AuditSink items. +message AuditSinkList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of audit configurations. + repeated AuditSink items = 2; +} + +// AuditSinkSpec holds the spec for the audit sink +message AuditSinkSpec { + // Policy defines the policy for selecting which events should be sent to the webhook + // required + optional Policy policy = 1; + + // Webhook to send events + // required + optional Webhook webhook = 2; +} + +// Policy defines the configuration of how audit events are logged +message Policy { + // The Level that all requests are recorded at. + // available options: None, Metadata, Request, RequestResponse + // required + optional string level = 1; + + // Stages is a list of stages for which events are created. + // +optional + repeated string stages = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // `namespace` is the namespace of the service. + // Required + optional string namespace = 1; + + // `name` is the name of the service. + // Required + optional string name = 2; + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + optional string path = 3; +} + +// Webhook holds the configuration of the webhook +message Webhook { + // Throttle holds the options for throttling the webhook + // +optional + optional WebhookThrottleConfig throttle = 1; + + // ClientConfig holds the connection parameters for the webhook + // required + optional WebhookClientConfig clientConfig = 2; +} + +// WebhookClientConfig contains the information to make a connection with the webhook +message WebhookClientConfig { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 1; + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // Port 443 will be used if it is open, otherwise it is an error. + // + // +optional + optional ServiceReference service = 2; + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 3; +} + +// WebhookThrottleConfig holds the configuration for throttling events +message WebhookThrottleConfig { + // ThrottleQPS maximum number of batches per second + // default 10 QPS + // +optional + optional int64 qps = 1; + + // ThrottleBurst is the maximum number of events sent at the same moment + // default 15 QPS + // +optional + optional int64 burst = 2; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/register.go b/vendor/k8s.io/api/auditregistration/v1alpha1/register.go similarity index 70% rename from vendor/k8s.io/kubernetes/pkg/apis/networking/v1/register.go rename to vendor/k8s.io/api/auditregistration/v1alpha1/register.go index 35a60bd8df623..d6271608f00c3 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/register.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,18 +14,19 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package v1alpha1 import ( - networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package -const GroupName = "networking.k8s.io" +const GroupName = "auditregistration.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -33,7 +34,8 @@ func Resource(resource string) schema.GroupResource { } var ( - localSchemeBuilder = &networkingv1.SchemeBuilder + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder AddToScheme = localSchemeBuilder.AddToScheme ) @@ -41,5 +43,14 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs) + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AuditSink{}, + &AuditSinkList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil } diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go new file mode 100644 index 0000000000000..af31cfe275a2a --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go @@ -0,0 +1,194 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Level defines the amount of information logged during auditing +type Level string + +// Valid audit levels +const ( + // LevelNone disables auditing + LevelNone Level = "None" + // LevelMetadata provides the basic level of auditing. + LevelMetadata Level = "Metadata" + // LevelRequest provides Metadata level of auditing, and additionally + // logs the request object (does not apply for non-resource requests). + LevelRequest Level = "Request" + // LevelRequestResponse provides Request level of auditing, and additionally + // logs the response object (does not apply for non-resource requests and watches). + LevelRequestResponse Level = "RequestResponse" +) + +// Stage defines the stages in request handling during which audit events may be generated. +type Stage string + +// Valid audit stages. +const ( + // The stage for events generated after the audit handler receives the request, but before it + // is delegated down the handler chain. + StageRequestReceived = "RequestReceived" + // The stage for events generated after the response headers are sent, but before the response body + // is sent. This stage is only generated for long-running requests (e.g. watch). + StageResponseStarted = "ResponseStarted" + // The stage for events generated after the response body has been completed, and no more bytes + // will be sent. + StageResponseComplete = "ResponseComplete" + // The stage for events generated when a panic occurred. + StagePanic = "Panic" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuditSink represents a cluster level audit sink +type AuditSink struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the audit configuration spec + Spec AuditSinkSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// AuditSinkSpec holds the spec for the audit sink +type AuditSinkSpec struct { + // Policy defines the policy for selecting which events should be sent to the webhook + // required + Policy Policy `json:"policy" protobuf:"bytes,1,opt,name=policy"` + + // Webhook to send events + // required + Webhook Webhook `json:"webhook" protobuf:"bytes,2,opt,name=webhook"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuditSinkList is a list of AuditSink items. +type AuditSinkList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of audit configurations. + Items []AuditSink `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Policy defines the configuration of how audit events are logged +type Policy struct { + // The Level that all requests are recorded at. + // available options: None, Metadata, Request, RequestResponse + // required + Level Level `json:"level" protobuf:"bytes,1,opt,name=level"` + + // Stages is a list of stages for which events are created. + // +optional + Stages []Stage `json:"stages" protobuf:"bytes,2,opt,name=stages"` +} + +// Webhook holds the configuration of the webhook +type Webhook struct { + // Throttle holds the options for throttling the webhook + // +optional + Throttle *WebhookThrottleConfig `json:"throttle,omitempty" protobuf:"bytes,1,opt,name=throttle"` + + // ClientConfig holds the connection parameters for the webhook + // required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` +} + +// WebhookThrottleConfig holds the configuration for throttling events +type WebhookThrottleConfig struct { + // ThrottleQPS maximum number of batches per second + // default 10 QPS + // +optional + QPS *int64 `json:"qps,omitempty" protobuf:"bytes,1,opt,name=qps"` + + // ThrottleBurst is the maximum number of events sent at the same moment + // default 15 QPS + // +optional + Burst *int64 `json:"burst,omitempty" protobuf:"bytes,2,opt,name=burst"` +} + +// WebhookClientConfig contains the information to make a connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // Port 443 will be used if it is open, otherwise it is an error. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,3,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` +} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000000..edd608f3b26e8 --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,110 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AuditSink = map[string]string{ + "": "AuditSink represents a cluster level audit sink", + "spec": "Spec defines the audit configuration spec", +} + +func (AuditSink) SwaggerDoc() map[string]string { + return map_AuditSink +} + +var map_AuditSinkList = map[string]string{ + "": "AuditSinkList is a list of AuditSink items.", + "items": "List of audit configurations.", +} + +func (AuditSinkList) SwaggerDoc() map[string]string { + return map_AuditSinkList +} + +var map_AuditSinkSpec = map[string]string{ + "": "AuditSinkSpec holds the spec for the audit sink", + "policy": "Policy defines the policy for selecting which events should be sent to the webhook required", + "webhook": "Webhook to send events required", +} + +func (AuditSinkSpec) SwaggerDoc() map[string]string { + return map_AuditSinkSpec +} + +var map_Policy = map[string]string{ + "": "Policy defines the configuration of how audit events are logged", + "level": "The Level that all requests are recorded at. available options: None, Metadata, Request, RequestResponse required", + "stages": "Stages is a list of stages for which events are created.", +} + +func (Policy) SwaggerDoc() map[string]string { + return map_Policy +} + +var map_ServiceReference = map[string]string{ + "": "ServiceReference holds a reference to Service.legacy.k8s.io", + "namespace": "`namespace` is the namespace of the service. Required", + "name": "`name` is the name of the service. Required", + "path": "`path` is an optional URL path which will be sent in any request to this service.", +} + +func (ServiceReference) SwaggerDoc() map[string]string { + return map_ServiceReference +} + +var map_Webhook = map[string]string{ + "": "Webhook holds the configuration of the webhook", + "throttle": "Throttle holds the options for throttling the webhook", + "clientConfig": "ClientConfig holds the connection parameters for the webhook required", +} + +func (Webhook) SwaggerDoc() map[string]string { + return map_Webhook +} + +var map_WebhookClientConfig = map[string]string{ + "": "WebhookClientConfig contains the information to make a connection with the webhook", + "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", +} + +func (WebhookClientConfig) SwaggerDoc() map[string]string { + return map_WebhookClientConfig +} + +var map_WebhookThrottleConfig = map[string]string{ + "": "WebhookThrottleConfig holds the configuration for throttling events", + "qps": "ThrottleQPS maximum number of batches per second default 10 QPS", + "burst": "ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS", +} + +func (WebhookThrottleConfig) SwaggerDoc() map[string]string { + return map_WebhookThrottleConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..e71deffad3714 --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,224 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSink) DeepCopyInto(out *AuditSink) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSink. +func (in *AuditSink) DeepCopy() *AuditSink { + if in == nil { + return nil + } + out := new(AuditSink) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuditSink) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSinkList) DeepCopyInto(out *AuditSinkList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AuditSink, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkList. +func (in *AuditSinkList) DeepCopy() *AuditSinkList { + if in == nil { + return nil + } + out := new(AuditSinkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuditSinkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSinkSpec) DeepCopyInto(out *AuditSinkSpec) { + *out = *in + in.Policy.DeepCopyInto(&out.Policy) + in.Webhook.DeepCopyInto(&out.Webhook) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkSpec. +func (in *AuditSinkSpec) DeepCopy() *AuditSinkSpec { + if in == nil { + return nil + } + out := new(AuditSinkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + if in.Stages != nil { + in, out := &in.Stages, &out.Stages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Webhook) DeepCopyInto(out *Webhook) { + *out = *in + if in.Throttle != nil { + in, out := &in.Throttle, &out.Throttle + *out = new(WebhookThrottleConfig) + (*in).DeepCopyInto(*out) + } + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. +func (in *Webhook) DeepCopy() *Webhook { + if in == nil { + return nil + } + out := new(Webhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookThrottleConfig) DeepCopyInto(out *WebhookThrottleConfig) { + *out = *in + if in.QPS != nil { + in, out := &in.QPS, &out.QPS + *out = new(int64) + **out = **in + } + if in.Burst != nil { + in, out := &in.Burst, &out.Burst + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookThrottleConfig. +func (in *WebhookThrottleConfig) DeepCopy() *WebhookThrottleConfig { + if in == nil { + return nil + } + out := new(WebhookThrottleConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go index 2d2ed2ee82168..193f154abe98f 100644 --- a/vendor/k8s.io/api/authentication/v1/doc.go +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true + package v1 // import "k8s.io/api/authentication/v1" diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index 2ce2e2d78e006..4e7f28d8c9745 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -356,6 +355,21 @@ func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) i += copy(dAtA[i:], m.Token) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -394,6 +408,21 @@ func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -469,24 +498,6 @@ func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -580,6 +591,12 @@ func (m *TokenReviewSpec) Size() (n int) { _ = l l = len(m.Token) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -591,6 +608,12 @@ func (m *TokenReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Error) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -698,6 +721,7 @@ func (this *TokenReviewSpec) String() string { } s := strings.Join([]string{`&TokenReviewSpec{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -710,6 +734,7 @@ func (this *TokenReviewStatus) String() string { `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -1569,6 +1594,35 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } m.Token = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1698,6 +1752,35 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1861,51 +1944,14 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1915,46 +1961,85 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -2087,61 +2172,62 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 892 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0x8e, 0xf3, 0x63, 0xb5, 0x99, 0x74, 0x97, 0xdd, 0x29, 0x95, 0xa2, 0x05, 0xec, 0x60, 0x24, - 0x14, 0x01, 0xb5, 0x9b, 0x08, 0x95, 0xaa, 0x48, 0x48, 0x6b, 0x36, 0x82, 0x08, 0x41, 0xab, 0x69, - 0x77, 0x41, 0x9c, 0x98, 0xd8, 0x6f, 0xb3, 0x26, 0x78, 0x6c, 0xec, 0x71, 0x68, 0x6e, 0xfd, 0x13, - 0x38, 0x82, 0xc4, 0x81, 0x3f, 0x02, 0x89, 0x23, 0xd7, 0x3d, 0x56, 0x9c, 0x7a, 0x40, 0x11, 0x6b, - 0xfe, 0x05, 0x4e, 0x9c, 0xd0, 0x8c, 0x67, 0xe3, 0xfc, 0xd8, 0x4d, 0x73, 0xea, 0x2d, 0xf3, 0xde, - 0xf7, 0xbe, 0x79, 0xef, 0x9b, 0x2f, 0xcf, 0xa8, 0x37, 0xba, 0x97, 0x58, 0x7e, 0x68, 0x8f, 0xd2, - 0x01, 0xc4, 0x0c, 0x38, 0x24, 0xf6, 0x18, 0x98, 0x17, 0xc6, 0xb6, 0x4a, 0xd0, 0xc8, 0xb7, 0x69, - 0xca, 0xcf, 0x80, 0x71, 0xdf, 0xa5, 0xdc, 0x0f, 0x99, 0x3d, 0xee, 0xd8, 0x43, 0x60, 0x10, 0x53, - 0x0e, 0x9e, 0x15, 0xc5, 0x21, 0x0f, 0xf1, 0xeb, 0x39, 0xda, 0xa2, 0x91, 0x6f, 0x2d, 0xa2, 0xad, - 0x71, 0xe7, 0xe0, 0xf6, 0xd0, 0xe7, 0x67, 0xe9, 0xc0, 0x72, 0xc3, 0xc0, 0x1e, 0x86, 0xc3, 0xd0, - 0x96, 0x45, 0x83, 0xf4, 0x54, 0x9e, 0xe4, 0x41, 0xfe, 0xca, 0xc9, 0x0e, 0xde, 0x2f, 0xae, 0x0e, - 0xa8, 0x7b, 0xe6, 0x33, 0x88, 0x27, 0x76, 0x34, 0x1a, 0x8a, 0x40, 0x62, 0x07, 0xc0, 0xe9, 0x15, - 0x2d, 0x1c, 0xd8, 0xd7, 0x55, 0xc5, 0x29, 0xe3, 0x7e, 0x00, 0x2b, 0x05, 0x77, 0x5f, 0x54, 0x90, - 0xb8, 0x67, 0x10, 0xd0, 0xe5, 0x3a, 0xf3, 0x4f, 0x0d, 0xbd, 0xea, 0x84, 0x29, 0xf3, 0x1e, 0x0c, - 0xbe, 0x05, 0x97, 0x13, 0x38, 0x85, 0x18, 0x98, 0x0b, 0xb8, 0x85, 0xaa, 0x23, 0x9f, 0x79, 0x4d, - 0xad, 0xa5, 0xb5, 0xeb, 0xce, 0x8d, 0xf3, 0xa9, 0x51, 0xca, 0xa6, 0x46, 0xf5, 0x33, 0x9f, 0x79, - 0x44, 0x66, 0x70, 0x17, 0x21, 0xfa, 0xb0, 0x7f, 0x02, 0x71, 0xe2, 0x87, 0xac, 0x59, 0x96, 0x38, - 0xac, 0x70, 0xe8, 0x70, 0x96, 0x21, 0x73, 0x28, 0xc1, 0xca, 0x68, 0x00, 0xcd, 0xca, 0x22, 0xeb, - 0x17, 0x34, 0x00, 0x22, 0x33, 0xd8, 0x41, 0x95, 0xb4, 0x7f, 0xd4, 0xac, 0x4a, 0xc0, 0x1d, 0x05, - 0xa8, 0x1c, 0xf7, 0x8f, 0xfe, 0x9b, 0x1a, 0x6f, 0x5e, 0x37, 0x24, 0x9f, 0x44, 0x90, 0x58, 0xc7, - 0xfd, 0x23, 0x22, 0x8a, 0xcd, 0x0f, 0x10, 0xea, 0x3d, 0xe1, 0x31, 0x3d, 0xa1, 0xdf, 0xa5, 0x80, - 0x0d, 0x54, 0xf3, 0x39, 0x04, 0x49, 0x53, 0x6b, 0x55, 0xda, 0x75, 0xa7, 0x9e, 0x4d, 0x8d, 0x5a, - 0x5f, 0x04, 0x48, 0x1e, 0xbf, 0xbf, 0xfd, 0xd3, 0xaf, 0x46, 0xe9, 0xe9, 0x5f, 0xad, 0x92, 0xf9, - 0x4b, 0x19, 0xdd, 0x78, 0x1c, 0x8e, 0x80, 0x11, 0xf8, 0x3e, 0x85, 0x84, 0xe3, 0x6f, 0xd0, 0xb6, - 0x78, 0x22, 0x8f, 0x72, 0x2a, 0x95, 0x68, 0x74, 0xef, 0x58, 0x85, 0x3b, 0x66, 0x4d, 0x58, 0xd1, - 0x68, 0x28, 0x02, 0x89, 0x25, 0xd0, 0xd6, 0xb8, 0x63, 0xe5, 0x72, 0x7e, 0x0e, 0x9c, 0x16, 0x9a, - 0x14, 0x31, 0x32, 0x63, 0xc5, 0x0f, 0x51, 0x35, 0x89, 0xc0, 0x95, 0xfa, 0x35, 0xba, 0x96, 0xb5, - 0xce, 0x7b, 0xd6, 0x7c, 0x6f, 0x8f, 0x22, 0x70, 0x0b, 0x05, 0xc5, 0x89, 0x48, 0x26, 0xfc, 0x15, - 0xda, 0x4a, 0x38, 0xe5, 0x69, 0x22, 0x55, 0x5e, 0xec, 0xf8, 0x45, 0x9c, 0xb2, 0xce, 0xd9, 0x55, - 0xac, 0x5b, 0xf9, 0x99, 0x28, 0x3e, 0xf3, 0x5f, 0x0d, 0xed, 0x2d, 0xb7, 0x80, 0xdf, 0x45, 0x75, - 0x9a, 0x7a, 0xbe, 0x30, 0xcd, 0xa5, 0xc4, 0x3b, 0xd9, 0xd4, 0xa8, 0x1f, 0x5e, 0x06, 0x49, 0x91, - 0xc7, 0x0c, 0xed, 0x0e, 0x16, 0xdc, 0xa6, 0x7a, 0xec, 0xae, 0xef, 0xf1, 0x2a, 0x87, 0x3a, 0x38, - 0x9b, 0x1a, 0xbb, 0x8b, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x46, 0xfb, 0xf0, 0x24, 0xf2, 0x63, 0xc9, - 0xf4, 0x08, 0xdc, 0x90, 0x79, 0x89, 0xf4, 0x56, 0xc5, 0xb9, 0x95, 0x4d, 0x8d, 0xfd, 0xde, 0x72, - 0x92, 0xac, 0xe2, 0xcd, 0xdf, 0x34, 0x84, 0x57, 0x55, 0xc2, 0x6f, 0xa1, 0x1a, 0x17, 0x51, 0xf5, - 0x17, 0xd9, 0x51, 0xa2, 0xd5, 0x72, 0x68, 0x9e, 0xc3, 0x13, 0x74, 0xb3, 0x20, 0x7c, 0xec, 0x07, - 0x90, 0x70, 0x1a, 0x44, 0xea, 0xb5, 0xdf, 0xd9, 0xcc, 0x4b, 0xa2, 0xcc, 0x79, 0x4d, 0xd1, 0xdf, - 0xec, 0xad, 0xd2, 0x91, 0xab, 0xee, 0x30, 0x7f, 0x2e, 0xa3, 0x86, 0x6a, 0x7b, 0xec, 0xc3, 0x0f, - 0x2f, 0xc1, 0xcb, 0x0f, 0x16, 0xbc, 0x7c, 0x7b, 0x23, 0xdf, 0x89, 0xd6, 0xae, 0xb5, 0xf2, 0x97, - 0x4b, 0x56, 0xb6, 0x37, 0xa7, 0x5c, 0xef, 0xe4, 0xbb, 0xe8, 0x95, 0xa5, 0xfb, 0x37, 0x7a, 0x4e, - 0xf3, 0x0f, 0x0d, 0xed, 0xaf, 0xdc, 0x82, 0x3f, 0x44, 0x3b, 0x73, 0xcd, 0x40, 0xbe, 0x34, 0xb7, - 0x9d, 0x5b, 0x8a, 0x62, 0xe7, 0x70, 0x3e, 0x49, 0x16, 0xb1, 0xf8, 0x53, 0x54, 0x4d, 0x13, 0x88, - 0x95, 0x68, 0x6f, 0xaf, 0x9f, 0xf0, 0x38, 0x81, 0xb8, 0xcf, 0x4e, 0xc3, 0x42, 0x2d, 0x11, 0x21, - 0x92, 0x41, 0x4c, 0x00, 0x71, 0x1c, 0xc6, 0x6a, 0xbb, 0xce, 0x26, 0xe8, 0x89, 0x20, 0xc9, 0x73, - 0xe6, 0xef, 0x65, 0xb4, 0x7d, 0xc9, 0x82, 0xdf, 0x43, 0xdb, 0xa2, 0x52, 0xae, 0xe4, 0x7c, 0xec, - 0x3d, 0x55, 0x24, 0x31, 0x22, 0x4e, 0x66, 0x08, 0xfc, 0x06, 0xaa, 0xa4, 0xbe, 0xa7, 0x36, 0x7d, - 0x63, 0x6e, 0x35, 0x13, 0x11, 0xc7, 0x26, 0xda, 0x1a, 0xc6, 0x61, 0x1a, 0x89, 0xc7, 0x12, 0x5b, - 0x00, 0x09, 0xdd, 0x3f, 0x91, 0x11, 0xa2, 0x32, 0xf8, 0x04, 0xd5, 0x40, 0x6c, 0xe6, 0x66, 0xb5, - 0x55, 0x69, 0x37, 0xba, 0x9d, 0xcd, 0xa6, 0xb5, 0xe4, 0x36, 0xef, 0x31, 0x1e, 0x4f, 0xe6, 0xa6, - 0x12, 0x31, 0x92, 0xd3, 0x1d, 0x0c, 0xd4, 0xc6, 0x97, 0x18, 0xbc, 0x87, 0x2a, 0x23, 0x98, 0xe4, - 0x13, 0x11, 0xf1, 0x13, 0x7f, 0x84, 0x6a, 0x63, 0xf1, 0x31, 0x50, 0x2a, 0xb7, 0xd7, 0xdf, 0x5b, - 0x7c, 0x3c, 0x48, 0x5e, 0x76, 0xbf, 0x7c, 0x4f, 0x73, 0xda, 0xe7, 0x17, 0x7a, 0xe9, 0xd9, 0x85, - 0x5e, 0x7a, 0x7e, 0xa1, 0x97, 0x9e, 0x66, 0xba, 0x76, 0x9e, 0xe9, 0xda, 0xb3, 0x4c, 0xd7, 0x9e, - 0x67, 0xba, 0xf6, 0x77, 0xa6, 0x6b, 0x3f, 0xfe, 0xa3, 0x97, 0xbe, 0x2e, 0x8f, 0x3b, 0xff, 0x07, - 0x00, 0x00, 0xff, 0xff, 0x5e, 0x8d, 0x94, 0x78, 0x88, 0x08, 0x00, 0x00, + // 900 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0xc0, 0x2e, 0x41, 0x42, + 0x15, 0xb0, 0xf6, 0x26, 0x42, 0xb0, 0x5a, 0x24, 0xa4, 0x9a, 0x46, 0x10, 0x21, 0xd8, 0xd5, 0xec, + 0xb6, 0x20, 0x4e, 0x4c, 0xec, 0xd7, 0xc4, 0x04, 0x8f, 0x8d, 0x3d, 0x0e, 0x9b, 0xdb, 0xfe, 0x09, + 0x1c, 0x41, 0xe2, 0xc0, 0x1f, 0x81, 0xc4, 0xbf, 0xd0, 0xe3, 0x8a, 0xd3, 0x1e, 0x50, 0x44, 0xcd, + 0x95, 0x23, 0x27, 0x4e, 0x68, 0xc6, 0xd3, 0x38, 0x4e, 0xda, 0x34, 0x27, 0x6e, 0x9e, 0xf7, 0xbe, + 0xf7, 0xbd, 0x37, 0xdf, 0x7c, 0x9e, 0x41, 0xbd, 0xf1, 0xfd, 0xd8, 0xf4, 0x02, 0x6b, 0x9c, 0x0c, + 0x20, 0x62, 0xc0, 0x21, 0xb6, 0x26, 0xc0, 0xdc, 0x20, 0xb2, 0x54, 0x82, 0x86, 0x9e, 0x45, 0x13, + 0x3e, 0x02, 0xc6, 0x3d, 0x87, 0x72, 0x2f, 0x60, 0xd6, 0xa4, 0x63, 0x0d, 0x81, 0x41, 0x44, 0x39, + 0xb8, 0x66, 0x18, 0x05, 0x3c, 0xc0, 0xaf, 0x66, 0x68, 0x93, 0x86, 0x9e, 0x59, 0x44, 0x9b, 0x93, + 0xce, 0xfe, 0xdd, 0xa1, 0xc7, 0x47, 0xc9, 0xc0, 0x74, 0x02, 0xdf, 0x1a, 0x06, 0xc3, 0xc0, 0x92, + 0x45, 0x83, 0xe4, 0x4c, 0xae, 0xe4, 0x42, 0x7e, 0x65, 0x64, 0xfb, 0xef, 0xe6, 0xad, 0x7d, 0xea, + 0x8c, 0x3c, 0x06, 0xd1, 0xd4, 0x0a, 0xc7, 0x43, 0x11, 0x88, 0x2d, 0x1f, 0x38, 0xbd, 0x62, 0x84, + 0x7d, 0xeb, 0xba, 0xaa, 0x28, 0x61, 0xdc, 0xf3, 0x61, 0xa5, 0xe0, 0xbd, 0x9b, 0x0a, 0x62, 0x67, + 0x04, 0x3e, 0x5d, 0xae, 0x6b, 0xff, 0xae, 0xa1, 0x97, 0xed, 0x20, 0x61, 0xee, 0xc3, 0xc1, 0x37, + 0xe0, 0x70, 0x02, 0x67, 0x10, 0x01, 0x73, 0x00, 0x1f, 0xa0, 0xea, 0xd8, 0x63, 0x6e, 0x4b, 0x3b, + 0xd0, 0x0e, 0x1b, 0xf6, 0xad, 0xf3, 0x99, 0x51, 0x4a, 0x67, 0x46, 0xf5, 0x53, 0x8f, 0xb9, 0x44, + 0x66, 0x70, 0x17, 0x21, 0xfa, 0xa8, 0x7f, 0x0a, 0x51, 0xec, 0x05, 0xac, 0x55, 0x96, 0x38, 0xac, + 0x70, 0xe8, 0x68, 0x9e, 0x21, 0x0b, 0x28, 0xc1, 0xca, 0xa8, 0x0f, 0xad, 0x4a, 0x91, 0xf5, 0x73, + 0xea, 0x03, 0x91, 0x19, 0x6c, 0xa3, 0x4a, 0xd2, 0x3f, 0x6e, 0x55, 0x25, 0xe0, 0x9e, 0x02, 0x54, + 0x4e, 0xfa, 0xc7, 0xff, 0xce, 0x8c, 0xd7, 0xaf, 0xdb, 0x24, 0x9f, 0x86, 0x10, 0x9b, 0x27, 0xfd, + 0x63, 0x22, 0x8a, 0xdb, 0xef, 0x23, 0xd4, 0x7b, 0xca, 0x23, 0x7a, 0x4a, 0xbf, 0x4d, 0x00, 0x1b, + 0xa8, 0xe6, 0x71, 0xf0, 0xe3, 0x96, 0x76, 0x50, 0x39, 0x6c, 0xd8, 0x8d, 0x74, 0x66, 0xd4, 0xfa, + 0x22, 0x40, 0xb2, 0xf8, 0x83, 0xfa, 0x8f, 0xbf, 0x18, 0xa5, 0x67, 0x7f, 0x1c, 0x94, 0xda, 0x3f, + 0x97, 0xd1, 0xad, 0x27, 0xc1, 0x18, 0x18, 0x81, 0xef, 0x12, 0x88, 0x39, 0xfe, 0x1a, 0xd5, 0xc5, + 0x11, 0xb9, 0x94, 0x53, 0xa9, 0x44, 0xb3, 0x7b, 0xcf, 0xcc, 0xdd, 0x31, 0x1f, 0xc2, 0x0c, 0xc7, + 0x43, 0x11, 0x88, 0x4d, 0x81, 0x36, 0x27, 0x1d, 0x33, 0x93, 0xf3, 0x33, 0xe0, 0x34, 0xd7, 0x24, + 0x8f, 0x91, 0x39, 0x2b, 0x7e, 0x84, 0xaa, 0x71, 0x08, 0x8e, 0xd4, 0xaf, 0xd9, 0x35, 0xcd, 0x75, + 0xde, 0x33, 0x17, 0x67, 0x7b, 0x1c, 0x82, 0x93, 0x2b, 0x28, 0x56, 0x44, 0x32, 0xe1, 0x2f, 0xd1, + 0x56, 0xcc, 0x29, 0x4f, 0x62, 0xa9, 0x72, 0x71, 0xe2, 0x9b, 0x38, 0x65, 0x9d, 0xbd, 0xa3, 0x58, + 0xb7, 0xb2, 0x35, 0x51, 0x7c, 0xed, 0x7f, 0x34, 0xb4, 0xbb, 0x3c, 0x02, 0x7e, 0x1b, 0x35, 0x68, + 0xe2, 0x7a, 0xc2, 0x34, 0x97, 0x12, 0x6f, 0xa7, 0x33, 0xa3, 0x71, 0x74, 0x19, 0x24, 0x79, 0x1e, + 0x33, 0xb4, 0x33, 0x28, 0xb8, 0x4d, 0xcd, 0xd8, 0x5d, 0x3f, 0xe3, 0x55, 0x0e, 0xb5, 0x71, 0x3a, + 0x33, 0x76, 0x8a, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x42, 0x7b, 0xf0, 0x34, 0xf4, 0x22, 0xc9, 0xf4, + 0x18, 0x9c, 0x80, 0xb9, 0xb1, 0xf4, 0x56, 0xc5, 0xbe, 0x93, 0xce, 0x8c, 0xbd, 0xde, 0x72, 0x92, + 0xac, 0xe2, 0xdb, 0xbf, 0x6a, 0x08, 0xaf, 0xaa, 0x84, 0xdf, 0x40, 0x35, 0x2e, 0xa2, 0xea, 0x17, + 0xd9, 0x56, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x53, 0x74, 0x3b, 0x27, 0x7c, 0xe2, 0xf9, 0x10, + 0x73, 0xea, 0x87, 0xea, 0xb4, 0xdf, 0xda, 0xcc, 0x4b, 0xa2, 0xcc, 0x7e, 0x45, 0xd1, 0xdf, 0xee, + 0xad, 0xd2, 0x91, 0xab, 0x7a, 0xb4, 0x7f, 0x2a, 0xa3, 0xa6, 0x1a, 0x7b, 0xe2, 0xc1, 0xf7, 0xff, + 0x83, 0x97, 0x1f, 0x16, 0xbc, 0x7c, 0x77, 0x23, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x17, 0x4b, + 0x56, 0xb6, 0x36, 0xa7, 0x5c, 0xef, 0x64, 0x07, 0xbd, 0xb4, 0xd4, 0x7f, 0xb3, 0xe3, 0x2c, 0x98, + 0xbd, 0xbc, 0xde, 0xec, 0xed, 0xbf, 0x35, 0xb4, 0xb7, 0x32, 0x12, 0xfe, 0x00, 0x6d, 0x2f, 0x4c, + 0x0e, 0xd9, 0x0d, 0x5b, 0xb7, 0xef, 0xa8, 0x7e, 0xdb, 0x47, 0x8b, 0x49, 0x52, 0xc4, 0xe2, 0x4f, + 0x50, 0x35, 0x89, 0x21, 0x52, 0x0a, 0xbf, 0xb9, 0x5e, 0x8e, 0x93, 0x18, 0xa2, 0x3e, 0x3b, 0x0b, + 0x72, 0x69, 0x45, 0x84, 0x48, 0x06, 0xb1, 0x5d, 0x88, 0xa2, 0x20, 0x52, 0x57, 0xf1, 0x7c, 0xbb, + 0x3d, 0x11, 0x24, 0x59, 0xae, 0xb8, 0xdd, 0xea, 0x0d, 0xdb, 0xfd, 0xad, 0x8c, 0xea, 0x97, 0x2d, + 0xf1, 0x3b, 0xa8, 0x2e, 0xda, 0xc8, 0xcb, 0x3e, 0x13, 0x74, 0x57, 0x75, 0x90, 0x18, 0x11, 0x27, + 0x73, 0x04, 0x7e, 0x0d, 0x55, 0x12, 0xcf, 0x55, 0x6f, 0x48, 0x73, 0xe1, 0xd2, 0x27, 0x22, 0x8e, + 0xdb, 0x68, 0x6b, 0x18, 0x05, 0x49, 0x28, 0x6c, 0x20, 0x66, 0x40, 0xe2, 0x44, 0x3f, 0x96, 0x11, + 0xa2, 0x32, 0xf8, 0x14, 0xd5, 0x40, 0xdc, 0xf9, 0x72, 0xcc, 0x66, 0xb7, 0xb3, 0x99, 0x34, 0xa6, + 0x7c, 0x27, 0x7a, 0x8c, 0x47, 0xd3, 0x05, 0x09, 0x44, 0x8c, 0x64, 0x74, 0xfb, 0x03, 0xf5, 0x96, + 0x48, 0x0c, 0xde, 0x45, 0x95, 0x31, 0x4c, 0xb3, 0x1d, 0x11, 0xf1, 0x89, 0x3f, 0x44, 0xb5, 0x89, + 0x78, 0x66, 0xd4, 0x91, 0x1c, 0xae, 0xef, 0x9b, 0x3f, 0x4b, 0x24, 0x2b, 0x7b, 0x50, 0xbe, 0xaf, + 0xd9, 0x87, 0xe7, 0x17, 0x7a, 0xe9, 0xf9, 0x85, 0x5e, 0x7a, 0x71, 0xa1, 0x97, 0x9e, 0xa5, 0xba, + 0x76, 0x9e, 0xea, 0xda, 0xf3, 0x54, 0xd7, 0x5e, 0xa4, 0xba, 0xf6, 0x67, 0xaa, 0x6b, 0x3f, 0xfc, + 0xa5, 0x97, 0xbe, 0x2a, 0x4f, 0x3a, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x04, 0x81, 0x6f, + 0xe2, 0x08, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index 10c792171d2ab..b69636a814dd8 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -118,6 +118,14 @@ message TokenReviewSpec { // Token is the opaque bearer token. // +optional optional string token = 1; + + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + repeated string audiences = 2; } // TokenReviewStatus is the result of the token authentication request. @@ -130,6 +138,18 @@ message TokenReviewStatus { // +optional optional UserInfo user = 2; + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + repeated string audiences = 4; + // Error indicates that the token couldn't be checked // +optional optional string error = 3; diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go index 723457a3dd212..d348c6fd40546 100644 --- a/vendor/k8s.io/api/authentication/v1/types.go +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -64,6 +64,13 @@ type TokenReviewSpec struct { // Token is the opaque bearer token. // +optional Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } // TokenReviewStatus is the result of the token authentication request. @@ -74,6 +81,17 @@ type TokenReviewStatus struct { // User is the UserInfo associated with the provided token. // +optional User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index 6632a5dd5b20f..f2c9b95c71f18 100644 --- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -79,8 +79,9 @@ func (TokenReview) SwaggerDoc() map[string]string { } var map_TokenReviewSpec = map[string]string{ - "": "TokenReviewSpec is a description of the token authentication request.", - "token": "Token is the opaque bearer token.", + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", + "audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", } func (TokenReviewSpec) SwaggerDoc() map[string]string { @@ -91,6 +92,7 @@ var map_TokenReviewStatus = map[string]string{ "": "TokenReviewStatus is the result of the token authentication request.", "authenticated": "Authenticated indicates that the token was associated with a known user.", "user": "User is the UserInfo associated with the provided token.", + "audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", "error": "Error indicates that the token couldn't be checked", } diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index f36c253b2e75e..aca99c42b765d 100644 --- a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -141,7 +141,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } @@ -167,6 +167,11 @@ func (in *TokenReview) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -184,6 +189,11 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { *out = *in in.User.DeepCopyInto(&out.User) + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go index e0de315d40d0e..919f3c42fdd18 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true + package v1beta1 // import "k8s.io/api/authentication/v1beta1" diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 8503d212baae5..5f34e76a9c3e9 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -176,6 +175,21 @@ func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) i += copy(dAtA[i:], m.Token) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -214,6 +228,21 @@ func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -289,24 +318,6 @@ func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -345,6 +356,12 @@ func (m *TokenReviewSpec) Size() (n int) { _ = l l = len(m.Token) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -356,6 +373,12 @@ func (m *TokenReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Error) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -415,6 +438,7 @@ func (this *TokenReviewSpec) String() string { } s := strings.Join([]string{`&TokenReviewSpec{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -427,6 +451,7 @@ func (this *TokenReviewStatus) String() string { `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -739,6 +764,35 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } m.Token = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -868,6 +922,35 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1031,51 +1114,14 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1085,46 +1131,85 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1257,45 +1342,47 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 635 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcf, 0x4f, 0xd4, 0x40, - 0x14, 0x6e, 0xf7, 0x07, 0xee, 0xce, 0x8a, 0xe2, 0x24, 0x26, 0x9b, 0x4d, 0xec, 0xae, 0xeb, 0x85, - 0x44, 0x99, 0x0a, 0x21, 0x48, 0xf0, 0x64, 0x95, 0x18, 0x4c, 0x88, 0xc9, 0x08, 0x1e, 0xd4, 0x83, - 0xb3, 0xdd, 0x47, 0xb7, 0xae, 0xed, 0x34, 0xd3, 0x69, 0x95, 0x1b, 0x7f, 0x82, 0x47, 0x8f, 0x26, - 0xfe, 0x25, 0x26, 0x1e, 0x38, 0x72, 0xe4, 0x60, 0x88, 0xd4, 0x7f, 0xc4, 0xcc, 0x74, 0x64, 0x17, - 0x88, 0x01, 0x6e, 0xf3, 0xbe, 0xf7, 0xbe, 0x6f, 0xde, 0xf7, 0x66, 0x1e, 0x7a, 0x31, 0x5e, 0x4d, - 0x49, 0xc8, 0xdd, 0x71, 0x36, 0x00, 0x11, 0x83, 0x84, 0xd4, 0xcd, 0x21, 0x1e, 0x72, 0xe1, 0x9a, - 0x04, 0x4b, 0x42, 0x97, 0x65, 0x72, 0x04, 0xb1, 0x0c, 0x7d, 0x26, 0x43, 0x1e, 0xbb, 0xf9, 0xe2, - 0x00, 0x24, 0x5b, 0x74, 0x03, 0x88, 0x41, 0x30, 0x09, 0x43, 0x92, 0x08, 0x2e, 0x39, 0xbe, 0x5b, - 0x52, 0x08, 0x4b, 0x42, 0x72, 0x9a, 0x42, 0x0c, 0xa5, 0xb3, 0x10, 0x84, 0x72, 0x94, 0x0d, 0x88, - 0xcf, 0x23, 0x37, 0xe0, 0x01, 0x77, 0x35, 0x73, 0x90, 0xed, 0xe8, 0x48, 0x07, 0xfa, 0x54, 0x2a, - 0x76, 0x96, 0x27, 0x4d, 0x44, 0xcc, 0x1f, 0x85, 0x31, 0x88, 0x5d, 0x37, 0x19, 0x07, 0x0a, 0x48, - 0xdd, 0x08, 0x24, 0x73, 0xf3, 0x73, 0x7d, 0x74, 0xdc, 0xff, 0xb1, 0x44, 0x16, 0xcb, 0x30, 0x82, - 0x73, 0x84, 0x95, 0x8b, 0x08, 0xa9, 0x3f, 0x82, 0x88, 0x9d, 0xe5, 0xf5, 0x1f, 0x21, 0xb4, 0xfe, - 0x59, 0x0a, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x51, 0x3d, 0x94, 0x10, 0xa5, 0x6d, 0xbb, 0x57, - 0x9d, 0x6f, 0x7a, 0xcd, 0xe2, 0xa8, 0x5b, 0xdf, 0x50, 0x00, 0x2d, 0xf1, 0xb5, 0xc6, 0xd7, 0x6f, - 0x5d, 0x6b, 0xef, 0x57, 0xcf, 0xea, 0x7f, 0xaf, 0xa0, 0xd6, 0x16, 0x1f, 0x43, 0x4c, 0x21, 0x0f, - 0xe1, 0x13, 0x7e, 0x8f, 0x1a, 0xca, 0xcc, 0x90, 0x49, 0xd6, 0xb6, 0x7b, 0xf6, 0x7c, 0x6b, 0xe9, - 0x21, 0x99, 0x0c, 0xf3, 0xa4, 0x27, 0x92, 0x8c, 0x03, 0x05, 0xa4, 0x44, 0x55, 0x93, 0x7c, 0x91, - 0xbc, 0x1c, 0x7c, 0x00, 0x5f, 0x6e, 0x82, 0x64, 0x1e, 0xde, 0x3f, 0xea, 0x5a, 0xc5, 0x51, 0x17, - 0x4d, 0x30, 0x7a, 0xa2, 0x8a, 0xb7, 0x50, 0x2d, 0x4d, 0xc0, 0x6f, 0x57, 0xb4, 0xfa, 0x12, 0xb9, - 0xf0, 0xa9, 0xc8, 0x54, 0x7f, 0xaf, 0x12, 0xf0, 0xbd, 0xeb, 0x46, 0xbf, 0xa6, 0x22, 0xaa, 0xd5, - 0xf0, 0x3b, 0x34, 0x93, 0x4a, 0x26, 0xb3, 0xb4, 0x5d, 0xd5, 0xba, 0xcb, 0x57, 0xd4, 0xd5, 0x5c, - 0xef, 0x86, 0x51, 0x9e, 0x29, 0x63, 0x6a, 0x34, 0xfb, 0x2b, 0xe8, 0xe6, 0x99, 0x26, 0xf0, 0x3d, - 0x54, 0x97, 0x0a, 0xd2, 0x53, 0x6a, 0x7a, 0xb3, 0x86, 0x59, 0x2f, 0xeb, 0xca, 0x5c, 0xff, 0xa7, - 0x8d, 0x6e, 0x9d, 0xbb, 0x05, 0x3f, 0x46, 0xb3, 0x53, 0x1d, 0xc1, 0x50, 0x4b, 0x34, 0xbc, 0xdb, - 0x46, 0x62, 0xf6, 0xc9, 0x74, 0x92, 0x9e, 0xae, 0xc5, 0x9b, 0xa8, 0x96, 0xa5, 0x20, 0xcc, 0xf8, - 0xee, 0x5f, 0xc2, 0xe6, 0x76, 0x0a, 0x62, 0x23, 0xde, 0xe1, 0x93, 0xb9, 0x29, 0x84, 0x6a, 0x19, - 0x65, 0x03, 0x84, 0xe0, 0x42, 0x8f, 0x6d, 0xca, 0xc6, 0xba, 0x02, 0x69, 0x99, 0xeb, 0xff, 0xa8, - 0xa0, 0xc6, 0x3f, 0x15, 0xfc, 0x00, 0x35, 0x14, 0x33, 0x66, 0x11, 0x18, 0xef, 0x73, 0x86, 0xa4, - 0x6b, 0x14, 0x4e, 0x4f, 0x2a, 0xf0, 0x1d, 0x54, 0xcd, 0xc2, 0xa1, 0xee, 0xb6, 0xe9, 0xb5, 0x4c, - 0x61, 0x75, 0x7b, 0xe3, 0x19, 0x55, 0x38, 0xee, 0xa3, 0x99, 0x40, 0xf0, 0x2c, 0x51, 0xcf, 0xa6, - 0xbe, 0x2a, 0x52, 0xc3, 0x7f, 0xae, 0x11, 0x6a, 0x32, 0xf8, 0x2d, 0xaa, 0x83, 0xfa, 0xdb, 0xed, - 0x5a, 0xaf, 0x3a, 0xdf, 0x5a, 0x5a, 0xb9, 0x82, 0x65, 0xa2, 0x97, 0x62, 0x3d, 0x96, 0x62, 0x77, - 0xca, 0x9a, 0xc2, 0x68, 0xa9, 0xd9, 0x09, 0xcc, 0xe2, 0xe8, 0x1a, 0x3c, 0x87, 0xaa, 0x63, 0xd8, - 0x2d, 0x6d, 0x51, 0x75, 0xc4, 0x4f, 0x51, 0x3d, 0x57, 0x3b, 0x65, 0xe6, 0xbd, 0x70, 0x89, 0xcb, - 0x27, 0x8b, 0x48, 0x4b, 0xee, 0x5a, 0x65, 0xd5, 0xf6, 0x16, 0xf6, 0x8f, 0x1d, 0xeb, 0xe0, 0xd8, - 0xb1, 0x0e, 0x8f, 0x1d, 0x6b, 0xaf, 0x70, 0xec, 0xfd, 0xc2, 0xb1, 0x0f, 0x0a, 0xc7, 0x3e, 0x2c, - 0x1c, 0xfb, 0x77, 0xe1, 0xd8, 0x5f, 0xfe, 0x38, 0xd6, 0x9b, 0x6b, 0x46, 0xe4, 0x6f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x39, 0x00, 0xe7, 0xfa, 0x0e, 0x05, 0x00, 0x00, + // 663 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x4e, 0x14, 0x4d, + 0x14, 0xed, 0x9e, 0x1f, 0xbe, 0x99, 0x9a, 0x6f, 0x14, 0x2b, 0x31, 0x99, 0x4c, 0x62, 0x0f, 0x8e, + 0x1b, 0x12, 0xa4, 0x5a, 0x08, 0x41, 0x82, 0x2b, 0x5a, 0x89, 0xc1, 0x84, 0x98, 0x94, 0xe0, 0x42, + 0x5d, 0x58, 0xd3, 0x73, 0xe9, 0x69, 0xc7, 0xfe, 0x49, 0x55, 0xf5, 0x28, 0x3b, 0x1e, 0xc1, 0xa5, + 0x4b, 0x13, 0x9f, 0xc4, 0x1d, 0x4b, 0x96, 0x2c, 0xcc, 0x44, 0xda, 0x27, 0xf0, 0x0d, 0x4c, 0x55, + 0x17, 0xcc, 0x00, 0x31, 0xc0, 0xae, 0xeb, 0xdc, 0x7b, 0xce, 0x3d, 0xf7, 0x54, 0x17, 0x7a, 0x31, + 0x5c, 0x13, 0x24, 0x4c, 0xdc, 0x61, 0xd6, 0x03, 0x1e, 0x83, 0x04, 0xe1, 0x8e, 0x20, 0xee, 0x27, + 0xdc, 0x35, 0x05, 0x96, 0x86, 0x2e, 0xcb, 0xe4, 0x00, 0x62, 0x19, 0xfa, 0x4c, 0x86, 0x49, 0xec, + 0x8e, 0x96, 0x7a, 0x20, 0xd9, 0x92, 0x1b, 0x40, 0x0c, 0x9c, 0x49, 0xe8, 0x93, 0x94, 0x27, 0x32, + 0xc1, 0xf7, 0x0b, 0x0a, 0x61, 0x69, 0x48, 0xce, 0x53, 0x88, 0xa1, 0xb4, 0x17, 0x83, 0x50, 0x0e, + 0xb2, 0x1e, 0xf1, 0x93, 0xc8, 0x0d, 0x92, 0x20, 0x71, 0x35, 0xb3, 0x97, 0xed, 0xe9, 0x93, 0x3e, + 0xe8, 0xaf, 0x42, 0xb1, 0xbd, 0x32, 0x31, 0x11, 0x31, 0x7f, 0x10, 0xc6, 0xc0, 0xf7, 0xdd, 0x74, + 0x18, 0x28, 0x40, 0xb8, 0x11, 0x48, 0xe6, 0x8e, 0x2e, 0xf9, 0x68, 0xbb, 0xff, 0x62, 0xf1, 0x2c, + 0x96, 0x61, 0x04, 0x97, 0x08, 0xab, 0x57, 0x11, 0x84, 0x3f, 0x80, 0x88, 0x5d, 0xe4, 0x75, 0x1f, + 0x23, 0xb4, 0xf9, 0x59, 0x72, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x50, 0x35, 0x94, 0x10, 0x89, + 0x96, 0x3d, 0x57, 0x9e, 0xaf, 0x7b, 0xf5, 0x7c, 0xdc, 0xa9, 0x6e, 0x29, 0x80, 0x16, 0xf8, 0x7a, + 0xed, 0xeb, 0xb7, 0x8e, 0x75, 0xf0, 0x73, 0xce, 0xea, 0x7e, 0x2f, 0xa1, 0xc6, 0x4e, 0x32, 0x84, + 0x98, 0xc2, 0x28, 0x84, 0x4f, 0xf8, 0x3d, 0xaa, 0xa9, 0x65, 0xfa, 0x4c, 0xb2, 0x96, 0x3d, 0x67, + 0xcf, 0x37, 0x96, 0x1f, 0x91, 0x49, 0x98, 0x67, 0x9e, 0x48, 0x3a, 0x0c, 0x14, 0x20, 0x88, 0xea, + 0x26, 0xa3, 0x25, 0xf2, 0xb2, 0xf7, 0x01, 0x7c, 0xb9, 0x0d, 0x92, 0x79, 0xf8, 0x70, 0xdc, 0xb1, + 0xf2, 0x71, 0x07, 0x4d, 0x30, 0x7a, 0xa6, 0x8a, 0x77, 0x50, 0x45, 0xa4, 0xe0, 0xb7, 0x4a, 0x5a, + 0x7d, 0x99, 0x5c, 0x79, 0x55, 0x64, 0xca, 0xdf, 0xab, 0x14, 0x7c, 0xef, 0x7f, 0xa3, 0x5f, 0x51, + 0x27, 0xaa, 0xd5, 0xf0, 0x3b, 0x34, 0x23, 0x24, 0x93, 0x99, 0x68, 0x95, 0xb5, 0xee, 0xca, 0x0d, + 0x75, 0x35, 0xd7, 0xbb, 0x65, 0x94, 0x67, 0x8a, 0x33, 0x35, 0x9a, 0x5d, 0x1f, 0xdd, 0xbe, 0x60, + 0x02, 0x3f, 0x40, 0x55, 0xa9, 0x20, 0x9d, 0x52, 0xdd, 0x6b, 0x1a, 0x66, 0xb5, 0xe8, 0x2b, 0x6a, + 0x78, 0x01, 0xd5, 0x59, 0xd6, 0x0f, 0x21, 0xf6, 0x41, 0xb4, 0x4a, 0xfa, 0x32, 0x9a, 0xf9, 0xb8, + 0x53, 0xdf, 0x38, 0x05, 0xe9, 0xa4, 0xde, 0xfd, 0x63, 0xa3, 0x3b, 0x97, 0x2c, 0xe1, 0x27, 0xa8, + 0x39, 0x65, 0x1f, 0xfa, 0x7a, 0x5e, 0xcd, 0xbb, 0x6b, 0xe6, 0x35, 0x37, 0xa6, 0x8b, 0xf4, 0x7c, + 0x2f, 0xde, 0x46, 0x95, 0x4c, 0x00, 0x37, 0x59, 0x2f, 0x5c, 0x23, 0x93, 0x5d, 0x01, 0x7c, 0x2b, + 0xde, 0x4b, 0x26, 0x21, 0x2b, 0x84, 0x6a, 0x19, 0xb5, 0x33, 0x70, 0x9e, 0x70, 0x9d, 0xf1, 0xd4, + 0xce, 0x9b, 0x0a, 0xa4, 0x45, 0xed, 0xfc, 0xce, 0x95, 0x2b, 0x76, 0xfe, 0x51, 0x42, 0xb5, 0xd3, + 0x91, 0xf8, 0x21, 0xaa, 0xa9, 0x31, 0x31, 0x8b, 0xc0, 0xa4, 0x3a, 0x6b, 0x26, 0xe8, 0x1e, 0x85, + 0xd3, 0xb3, 0x0e, 0x7c, 0x0f, 0x95, 0xb3, 0xb0, 0xaf, 0x57, 0xab, 0x7b, 0x0d, 0xd3, 0x58, 0xde, + 0xdd, 0x7a, 0x46, 0x15, 0x8e, 0xbb, 0x68, 0x26, 0xe0, 0x49, 0x96, 0xaa, 0x1f, 0x42, 0x79, 0x40, + 0xea, 0x5a, 0x9f, 0x6b, 0x84, 0x9a, 0x0a, 0x7e, 0x8b, 0xaa, 0xa0, 0x5e, 0x8d, 0xb6, 0xd9, 0x58, + 0x5e, 0xbd, 0x41, 0x3e, 0x44, 0x3f, 0xb7, 0xcd, 0x58, 0xf2, 0xfd, 0xa9, 0x1c, 0x14, 0x46, 0x0b, + 0xcd, 0x76, 0x60, 0x9e, 0xa4, 0xee, 0xc1, 0xb3, 0xa8, 0x3c, 0x84, 0xfd, 0x62, 0x2d, 0xaa, 0x3e, + 0xf1, 0x53, 0x54, 0x1d, 0xa9, 0xd7, 0x6a, 0x2e, 0x67, 0xf1, 0x1a, 0xc3, 0x27, 0x4f, 0x9c, 0x16, + 0xdc, 0xf5, 0xd2, 0x9a, 0xed, 0x2d, 0x1e, 0x9e, 0x38, 0xd6, 0xd1, 0x89, 0x63, 0x1d, 0x9f, 0x38, + 0xd6, 0x41, 0xee, 0xd8, 0x87, 0xb9, 0x63, 0x1f, 0xe5, 0x8e, 0x7d, 0x9c, 0x3b, 0xf6, 0xaf, 0xdc, + 0xb1, 0xbf, 0xfc, 0x76, 0xac, 0x37, 0xff, 0x19, 0x91, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf7, + 0xd6, 0x32, 0x28, 0x68, 0x05, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index a057bc591cf39..caf2a6a53af4c 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -57,6 +57,14 @@ message TokenReviewSpec { // Token is the opaque bearer token. // +optional optional string token = 1; + + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + repeated string audiences = 2; } // TokenReviewStatus is the result of the token authentication request. @@ -69,6 +77,18 @@ message TokenReviewStatus { // +optional optional UserInfo user = 2; + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + repeated string audiences = 4; + // Error indicates that the token couldn't be checked // +optional optional string error = 3; diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go index a90949dc37de0..0b6cba822a268 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -48,6 +48,13 @@ type TokenReviewSpec struct { // Token is the opaque bearer token. // +optional Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } // TokenReviewStatus is the result of the token authentication request. @@ -58,6 +65,17 @@ type TokenReviewStatus struct { // User is the UserInfo associated with the provided token. // +optional User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go index 968999d1ebe6a..8c9acfb5b2492 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -38,8 +38,9 @@ func (TokenReview) SwaggerDoc() map[string]string { } var map_TokenReviewSpec = map[string]string{ - "": "TokenReviewSpec is a description of the token authentication request.", - "token": "Token is the opaque bearer token.", + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", + "audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", } func (TokenReviewSpec) SwaggerDoc() map[string]string { @@ -50,6 +51,7 @@ var map_TokenReviewStatus = map[string]string{ "": "TokenReviewStatus is the result of the token authentication request.", "authenticated": "Authenticated indicates that the token was associated with a known user.", "user": "User is the UserInfo associated with the provided token.", + "audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", "error": "Error indicates that the token couldn't be checked", } diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index 3a5f6d5a93677..a5d82a8100a57 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -49,7 +49,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } @@ -75,6 +75,11 @@ func (in *TokenReview) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -92,6 +97,11 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { *out = *in in.User.DeepCopyInto(&out.User) + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go index c06b798df8c19..c63ac28cfa79f 100644 --- a/vendor/k8s.io/api/authorization/v1/doc.go +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=authorization.k8s.io + package v1 // import "k8s.io/api/authorization/v1" diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index e9145af0261d5..fc6a25f629997 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -795,24 +794,6 @@ func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2888,51 +2869,14 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2942,46 +2886,85 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go index ea4f802e28928..324f293a17cd7 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=authorization.k8s.io + package v1beta1 // import "k8s.io/api/authorization/v1beta1" diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 75ee6cf918fc2..7cce98eb1952d 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -795,24 +794,6 @@ func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2888,51 +2869,14 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2942,46 +2886,85 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 47a46a5574ee2..950e93340d685 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -996,24 +995,6 @@ func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index a6e874f3da940..72ac97271218d 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -196,8 +196,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string { } var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", } @@ -207,8 +207,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { } var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index bee94129d8ef6..b6a5f35629b92 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto -// DO NOT EDIT! /* Package v2beta1 is a generated protocol buffer package. @@ -916,24 +915,6 @@ func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index 411b817d04ec9..589408ace0cea 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -197,8 +197,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string { } var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", } @@ -208,8 +208,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { } var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index be752a1402518..816fea9d532a3 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto -// DO NOT EDIT! /* Package v2beta2 is a generated protocol buffer package. @@ -966,24 +965,6 @@ func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index 097a6ff28c57c..3aa32b5784616 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -343,24 +342,6 @@ func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index ece2204f932f6..36342a3af1a3d 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -336,24 +335,6 @@ func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go index 6ab41ebbcb3ae..4d9ba5c000453 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto -// DO NOT EDIT! /* Package v2alpha1 is a generated protocol buffer package. @@ -336,24 +335,6 @@ func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go index fb23aadb0e964..8473b640fa552 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=certificates.k8s.io + package v1beta1 // import "k8s.io/api/certificates/v1beta1" diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index eda1599005a39..19bf225fa31a5 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -378,24 +377,6 @@ func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1221,51 +1202,14 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1275,46 +1219,85 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/coordination/v1beta1/doc.go b/vendor/k8s.io/api/coordination/v1beta1/doc.go index fecb513fcfa51..bc95fd17d4deb 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/doc.go +++ b/vendor/k8s.io/api/coordination/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=coordination.k8s.io + package v1beta1 // import "k8s.io/api/coordination/v1beta1" diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index 6c2dbd91f9a97..aa57e9dd64421 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -196,24 +195,6 @@ func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index 16a0cfced13e2..2c72ec2df2d6e 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -78,4 +78,23 @@ const ( // // Not all cloud providers support this annotation, though AWS & GCE do. AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that + // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') + // of the last change, of some Pod or Service object, that triggered the endpoints object change. + // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints + // controller at T1, and the Endpoints object was changed at T2, the + // EndpointsLastChangeTriggerTime would be set to T0. + // + // The "endpoints change trigger" here means any Pod or Service change that resulted in the + // Endpoints object change. + // + // Given the definition of the "endpoints change trigger", please note that this annotation will + // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the + // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's + // already set). + // + // This annotation will be used to compute the in-cluster network programming latency SLI, see + // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md + EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" ) diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index b569ea84de1c6..05cc6d62844c9 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -82,6 +81,7 @@ limitations under the License. FlockerVolumeSource GCEPersistentDiskVolumeSource GitRepoVolumeSource + GlusterfsPersistentVolumeSource GlusterfsVolumeSource HTTPGetAction HTTPHeader @@ -499,604 +499,610 @@ func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSou func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } +func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} +func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{57} +} + func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } func (m *Handler) Reset() { *m = Handler{} } func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{63} + return fileDescriptorGenerated, []int{64} } func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} -func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{92} + return fileDescriptorGenerated, []int{93} } func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{101} + return fileDescriptorGenerated, []int{102} } func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{102} + return fileDescriptorGenerated, []int{103} } func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{103} + return fileDescriptorGenerated, []int{104} } func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{104} + return fileDescriptorGenerated, []int{105} } func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{105} + return fileDescriptorGenerated, []int{106} } func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{107} + return fileDescriptorGenerated, []int{108} } func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{109} } func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{109} + return fileDescriptorGenerated, []int{110} } func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{110} + return fileDescriptorGenerated, []int{111} } func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} -func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} -func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} -func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{136} + return fileDescriptorGenerated, []int{137} } func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{140} + return fileDescriptorGenerated, []int{141} } func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{144} + return fileDescriptorGenerated, []int{145} } func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{145} + return fileDescriptorGenerated, []int{146} } func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{146} + return fileDescriptorGenerated, []int{147} } func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{147} + return fileDescriptorGenerated, []int{148} } func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{155} + return fileDescriptorGenerated, []int{156} } func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} -func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } +func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{158} + return fileDescriptorGenerated, []int{159} } func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{171} + return fileDescriptorGenerated, []int{172} } func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{178} + return fileDescriptorGenerated, []int{179} } func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{184} } func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{184} + return fileDescriptorGenerated, []int{185} } func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} -func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} } +func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{186} } func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{186} + return fileDescriptorGenerated, []int{187} } func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{187} } +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{188} } func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} -func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{188} } +func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} -func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } +func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{193} } func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{193} + return fileDescriptorGenerated, []int{194} } func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{194} + return fileDescriptorGenerated, []int{195} } func init() { @@ -1157,6 +1163,7 @@ func init() { proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") @@ -3949,6 +3956,46 @@ func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i += copy(dAtA[i:], m.EndpointsName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.EndpointsNamespace != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace))) + i += copy(dAtA[i:], *m.EndpointsNamespace) + } + return i, nil +} + func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7684,6 +7731,18 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) i += copy(dAtA[i:], *m.RuntimeClassName) } + if m.EnableServiceLinks != nil { + dAtA[i] = 0xf0 + i++ + dAtA[i] = 0x1 + i++ + if *m.EnableServiceLinks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -10946,24 +11005,6 @@ func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -11953,6 +11994,21 @@ func (m *GitRepoVolumeSource) Size() (n int) { return n } +func (m *GlusterfsPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.EndpointsNamespace != nil { + l = len(*m.EndpointsNamespace) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *GlusterfsVolumeSource) Size() (n int) { var l int _ = l @@ -13315,6 +13371,9 @@ func (m *PodSpec) Size() (n int) { l = len(*m.RuntimeClassName) n += 2 + l + sovGenerated(uint64(l)) } + if m.EnableServiceLinks != nil { + n += 3 + } return n } @@ -15295,6 +15354,19 @@ func (this *GitRepoVolumeSource) String() string { }, "") return s } +func (this *GlusterfsPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`, + `}`, + }, "") + return s +} func (this *GlusterfsVolumeSource) String() string { if this == nil { return "nil" @@ -16010,7 +16082,7 @@ func (this *PersistentVolumeSource) String() string { `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`, `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, @@ -16325,6 +16397,7 @@ func (this *PodSpec) String() string { `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, `ReadinessGates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ReadinessGates), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + `,`, `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, + `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, `}`, }, "") return s @@ -18557,51 +18630,14 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.VolumeAttributes == nil { m.VolumeAttributes = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18611,41 +18647,80 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.VolumeAttributes[mapkey] = mapvalue - } else { - var mapvalue string - m.VolumeAttributes[mapkey] = mapvalue } + m.VolumeAttributes[mapkey] = mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -20178,51 +20253,14 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Data == nil { m.Data = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20232,41 +20270,80 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Data[mapkey] = mapvalue - } else { - var mapvalue string - m.Data[mapkey] = mapvalue } + m.Data[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -20294,51 +20371,14 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.BinaryData == nil { m.BinaryData = make(map[string][]byte) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20348,42 +20388,81 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - m.BinaryData[mapkey] = mapvalue - } else { - var mapvalue []byte - m.BinaryData[mapkey] = mapvalue } + m.BinaryData[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -25913,51 +25992,14 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Options == nil { m.Options = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25967,41 +26009,80 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Options[mapkey] = mapvalue - } else { - var mapvalue string - m.Options[mapkey] = mapvalue } + m.Options[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -26190,51 +26271,14 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Options == nil { m.Options = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26244,41 +26288,80 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Options[mapkey] = mapvalue - } else { - var mapvalue string - m.Options[mapkey] = mapvalue } + m.Options[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -26693,7 +26776,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { +func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26716,10 +26799,10 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -26800,118 +26883,276 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } } m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EndpointsNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.EndpointsNamespace = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointsName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -28605,51 +28846,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Max == nil { m.Max = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28659,46 +28863,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Max[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Max[ResourceName(mapkey)] = mapvalue } + m.Max[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -28726,51 +28969,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Min == nil { m.Min = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28780,46 +28986,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Min[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Min[ResourceName(mapkey)] = mapvalue } + m.Min[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -28847,51 +29092,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Default == nil { m.Default = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28901,46 +29109,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Default[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Default[ResourceName(mapkey)] = mapvalue } + m.Default[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 5: if wireType != 2 { @@ -28968,51 +29215,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.DefaultRequest == nil { m.DefaultRequest = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29022,46 +29232,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.DefaultRequest[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.DefaultRequest[ResourceName(mapkey)] = mapvalue } + m.DefaultRequest[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -29089,51 +29338,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.MaxLimitRequestRatio == nil { m.MaxLimitRequestRatio = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29143,46 +29355,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.MaxLimitRequestRatio[ResourceName(mapkey)] = mapvalue } + m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -31596,51 +31847,14 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31650,46 +31864,85 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -32318,51 +32571,14 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32372,46 +32588,85 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -32439,51 +32694,14 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Allocatable == nil { m.Allocatable = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32493,46 +32711,85 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Allocatable[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Allocatable[ResourceName(mapkey)] = mapvalue } + m.Allocatable[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -34468,42 +34725,13 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = PersistentVolumeClaimPhase(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) + m.Phase = PersistentVolumeClaimPhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34513,34 +34741,26 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) } - var stringLenmapkey uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34550,26 +34770,26 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34579,46 +34799,85 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -35037,7 +35296,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Glusterfs == nil { - m.Glusterfs = &GlusterfsVolumeSource{} + m.Glusterfs = &GlusterfsPersistentVolumeSource{} } if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -35713,51 +35972,14 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35767,46 +35989,85 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -38810,51 +39071,14 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.NodeSelector == nil { m.NodeSelector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -38864,41 +39088,80 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.NodeSelector[mapkey] = mapvalue - } else { - var mapvalue string - m.NodeSelector[mapkey] = mapvalue } + m.NodeSelector[mapkey] = mapvalue iNdEx = postIndex case 8: if wireType != 2 { @@ -39509,6 +39772,27 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.RuntimeClassName = &s iNdEx = postIndex + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableServiceLinks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.EnableServiceLinks = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -42503,51 +42787,14 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -42557,41 +42804,80 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -43286,51 +43572,14 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Hard == nil { m.Hard = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43340,46 +43589,85 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Hard[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Hard[ResourceName(mapkey)] = mapvalue } + m.Hard[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -43519,51 +43807,14 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Hard == nil { m.Hard = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43573,46 +43824,85 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Hard[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Hard[ResourceName(mapkey)] = mapvalue } + m.Hard[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -43640,51 +43930,14 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Used == nil { m.Used = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43694,46 +43947,85 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Used[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Used[ResourceName(mapkey)] = mapvalue } + m.Used[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -43811,51 +44103,14 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Limits == nil { m.Limits = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43865,46 +44120,85 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Limits[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Limits[ResourceName(mapkey)] = mapvalue } + m.Limits[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -43932,51 +44226,14 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Requests == nil { m.Requests = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43986,46 +44243,85 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Requests[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Requests[ResourceName(mapkey)] = mapvalue } + m.Requests[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -45169,51 +45465,162 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Data == nil { - m.Data = make(map[string][]byte) + if m.Data == nil { + m.Data = make(map[string][]byte) + } + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Data[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SecretType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StringData == nil { + m.StringData = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -45223,187 +45630,80 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - m.Data[mapkey] = mapvalue - } else { - var mapvalue []byte - m.Data[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = SecretType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.StringData == nil { - m.StringData = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if skippy < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF + iNdEx += skippy } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.StringData[mapkey] = mapvalue - } else { - var mapvalue string - m.StringData[mapkey] = mapvalue } + m.StringData[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -47503,51 +47803,14 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47557,41 +47820,80 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -51346,804 +51648,808 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 12780 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6b, 0x6c, 0x24, 0x47, - 0x7a, 0xd8, 0xf5, 0xcc, 0x90, 0x9c, 0xf9, 0xf8, 0xae, 0x7d, 0x88, 0x4b, 0x69, 0x77, 0x56, 0xad, - 0xbb, 0xd5, 0xea, 0x24, 0x91, 0xa7, 0x95, 0x74, 0x92, 0x4f, 0x3a, 0xd9, 0x24, 0x87, 0xdc, 0x1d, - 0xed, 0x92, 0x3b, 0xaa, 0xe1, 0xee, 0xde, 0xc9, 0xba, 0xf3, 0x35, 0x67, 0x8a, 0x64, 0x8b, 0xc3, - 0xee, 0x51, 0x77, 0x0f, 0x77, 0xa9, 0xd8, 0x40, 0x72, 0x8e, 0x9d, 0x5c, 0x6c, 0x04, 0x87, 0xd8, - 0xc8, 0xc3, 0x36, 0x1c, 0xc0, 0x71, 0x60, 0x3b, 0x4e, 0x82, 0x38, 0x76, 0x6c, 0xc7, 0x67, 0x27, - 0x8e, 0x9d, 0x1f, 0x0e, 0x10, 0x5c, 0x9c, 0x00, 0xc1, 0x19, 0x30, 0xc2, 0xd8, 0x74, 0x1e, 0xf0, - 0x8f, 0x3c, 0x10, 0xe7, 0x47, 0xcc, 0x18, 0x71, 0x50, 0xcf, 0xae, 0xea, 0xe9, 0x9e, 0x19, 0xae, - 0xb8, 0x94, 0x7c, 0xb8, 0x7f, 0x33, 0xf5, 0x7d, 0xf5, 0x55, 0x75, 0x3d, 0xbf, 0xef, 0xab, 0xef, - 0x01, 0xaf, 0xed, 0xbc, 0x1a, 0xce, 0xb9, 0xfe, 0xfc, 0x4e, 0x67, 0x83, 0x04, 0x1e, 0x89, 0x48, - 0x38, 0xbf, 0x47, 0xbc, 0xa6, 0x1f, 0xcc, 0x0b, 0x80, 0xd3, 0x76, 0xe7, 0x1b, 0x7e, 0x40, 0xe6, - 0xf7, 0x5e, 0x98, 0xdf, 0x22, 0x1e, 0x09, 0x9c, 0x88, 0x34, 0xe7, 0xda, 0x81, 0x1f, 0xf9, 0x08, - 0x71, 0x9c, 0x39, 0xa7, 0xed, 0xce, 0x51, 0x9c, 0xb9, 0xbd, 0x17, 0x66, 0x9f, 0xdf, 0x72, 0xa3, - 0xed, 0xce, 0xc6, 0x5c, 0xc3, 0xdf, 0x9d, 0xdf, 0xf2, 0xb7, 0xfc, 0x79, 0x86, 0xba, 0xd1, 0xd9, - 0x64, 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x93, 0x98, 0x7d, 0x29, 0x6e, 0x66, 0xd7, 0x69, 0x6c, 0xbb, - 0x1e, 0x09, 0xf6, 0xe7, 0xdb, 0x3b, 0x5b, 0xac, 0xdd, 0x80, 0x84, 0x7e, 0x27, 0x68, 0x90, 0x64, - 0xc3, 0x3d, 0x6b, 0x85, 0xf3, 0xbb, 0x24, 0x72, 0x52, 0xba, 0x3b, 0x3b, 0x9f, 0x55, 0x2b, 0xe8, - 0x78, 0x91, 0xbb, 0xdb, 0xdd, 0xcc, 0xa7, 0xfb, 0x55, 0x08, 0x1b, 0xdb, 0x64, 0xd7, 0xe9, 0xaa, - 0xf7, 0x62, 0x56, 0xbd, 0x4e, 0xe4, 0xb6, 0xe6, 0x5d, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, - 0x86, 0x05, 0x97, 0x17, 0xee, 0xd5, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0xc5, 0x96, 0xdf, 0xd8, - 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0xce, 0x2e, 0xa9, 0xb3, 0x81, 0x40, 0xcf, 0x41, 0x71, - 0x8f, 0xfd, 0xaf, 0x56, 0x66, 0xac, 0xcb, 0xd6, 0xd5, 0xd2, 0xe2, 0xd4, 0x6f, 0x1d, 0x94, 0x3f, - 0x76, 0x78, 0x50, 0x2e, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x30, 0xbc, 0x19, 0xae, 0xef, - 0xb7, 0xc9, 0x4c, 0x8e, 0xe1, 0x4e, 0x08, 0xdc, 0xe1, 0x95, 0x3a, 0x2d, 0xc5, 0x02, 0x8a, 0xe6, - 0xa1, 0xd4, 0x76, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0xc9, 0x5f, 0xb6, 0xae, 0x0e, 0x2d, 0x4e, - 0x0b, 0xd4, 0x52, 0x4d, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x79, 0xdb, 0x6b, 0xed, - 0xcf, 0x14, 0x2e, 0x5b, 0x57, 0x8b, 0x71, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, 0xfe, 0x91, 0x1c, - 0x14, 0x17, 0x36, 0x37, 0x5d, 0xcf, 0x8d, 0xf6, 0xd1, 0x5d, 0x18, 0xf3, 0xfc, 0x26, 0x91, 0xff, - 0xd9, 0x57, 0x8c, 0x5e, 0xbb, 0x3c, 0xd7, 0xbd, 0x94, 0xe6, 0xd6, 0x34, 0xbc, 0xc5, 0xa9, 0xc3, - 0x83, 0xf2, 0x98, 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x68, 0xdb, 0x6f, 0x2a, 0xb2, 0x39, 0x46, - 0xb6, 0x9c, 0x46, 0xb6, 0x16, 0xa3, 0x2d, 0x4e, 0x1e, 0x1e, 0x94, 0x47, 0xb5, 0x02, 0xac, 0x13, - 0x41, 0x1b, 0x30, 0x49, 0xff, 0x7a, 0x91, 0xab, 0xe8, 0xe6, 0x19, 0xdd, 0xa7, 0xb2, 0xe8, 0x6a, - 0xa8, 0x8b, 0x67, 0x0e, 0x0f, 0xca, 0x93, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xbf, 0x0f, 0x13, 0x0b, - 0x51, 0xe4, 0x34, 0xb6, 0x49, 0x93, 0xcf, 0x20, 0x7a, 0x09, 0x0a, 0x9e, 0xb3, 0x4b, 0xc4, 0xfc, - 0x5e, 0x16, 0x03, 0x5b, 0x58, 0x73, 0x76, 0xc9, 0xd1, 0x41, 0x79, 0xea, 0x8e, 0xe7, 0xbe, 0xd7, - 0x11, 0xab, 0x82, 0x96, 0x61, 0x86, 0x8d, 0xae, 0x01, 0x34, 0xc9, 0x9e, 0xdb, 0x20, 0x35, 0x27, - 0xda, 0x16, 0xf3, 0x8d, 0x44, 0x5d, 0xa8, 0x28, 0x08, 0xd6, 0xb0, 0xec, 0x07, 0x50, 0x5a, 0xd8, - 0xf3, 0xdd, 0x66, 0xcd, 0x6f, 0x86, 0x68, 0x07, 0x26, 0xdb, 0x01, 0xd9, 0x24, 0x81, 0x2a, 0x9a, - 0xb1, 0x2e, 0xe7, 0xaf, 0x8e, 0x5e, 0xbb, 0x9a, 0xfa, 0xb1, 0x26, 0xea, 0xb2, 0x17, 0x05, 0xfb, - 0x8b, 0x8f, 0x89, 0xf6, 0x26, 0x13, 0x50, 0x9c, 0xa4, 0x6c, 0xff, 0xcb, 0x1c, 0x9c, 0x5b, 0x78, - 0xbf, 0x13, 0x90, 0x8a, 0x1b, 0xee, 0x24, 0x57, 0x78, 0xd3, 0x0d, 0x77, 0xd6, 0xe2, 0x11, 0x50, - 0x4b, 0xab, 0x22, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc3, 0x08, 0xfd, 0x7d, 0x07, 0x57, 0xc5, 0x27, - 0x9f, 0x11, 0xc8, 0xa3, 0x15, 0x27, 0x72, 0x2a, 0x1c, 0x84, 0x25, 0x0e, 0x5a, 0x85, 0xd1, 0x06, - 0xdb, 0x90, 0x5b, 0xab, 0x7e, 0x93, 0xb0, 0xc9, 0x2c, 0x2d, 0x3e, 0x4b, 0xd1, 0x97, 0xe2, 0xe2, - 0xa3, 0x83, 0xf2, 0x0c, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0x7f, 0x15, - 0x18, 0x25, 0x48, 0xd9, 0x5b, 0x57, 0xb5, 0xad, 0x32, 0xc4, 0xb6, 0xca, 0x58, 0xfa, 0x36, 0x41, - 0x2f, 0x40, 0x61, 0xc7, 0xf5, 0x9a, 0x33, 0xc3, 0x8c, 0xd6, 0x45, 0x3a, 0xe7, 0x37, 0x5d, 0xaf, - 0x79, 0x74, 0x50, 0x9e, 0x36, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0xc8, 0x82, 0x32, 0x83, - 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, 0xaf, 0x01, 0x84, - 0xa4, 0x11, 0x90, 0x48, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, 0x7a, 0x20, 0x84, - 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0xd4, 0x25, 0x00, 0xc7, 0x38, 0xc6, 0x81, - 0x90, 0xef, 0x77, 0x20, 0xa0, 0xcf, 0xc2, 0x64, 0xdc, 0x58, 0xd8, 0x76, 0x1a, 0x72, 0x00, 0xd9, - 0x96, 0xa9, 0x9b, 0x20, 0x9c, 0xc4, 0xb5, 0xff, 0x9e, 0x25, 0x16, 0x0f, 0xfd, 0xea, 0x8f, 0xf8, - 0xb7, 0xda, 0xbf, 0x6c, 0xc1, 0xc8, 0xa2, 0xeb, 0x35, 0x5d, 0x6f, 0x0b, 0x7d, 0x09, 0x8a, 0xf4, - 0x6e, 0x6a, 0x3a, 0x91, 0x23, 0xce, 0xbd, 0x4f, 0x69, 0x7b, 0x4b, 0x5d, 0x15, 0x73, 0xed, 0x9d, - 0x2d, 0x5a, 0x10, 0xce, 0x51, 0x6c, 0xba, 0xdb, 0x6e, 0x6f, 0xbc, 0x4b, 0x1a, 0xd1, 0x2a, 0x89, - 0x9c, 0xf8, 0x73, 0xe2, 0x32, 0xac, 0xa8, 0xa2, 0x9b, 0x30, 0x1c, 0x39, 0xc1, 0x16, 0x89, 0xc4, - 0x01, 0x98, 0x7a, 0x50, 0xf1, 0x9a, 0x98, 0xee, 0x48, 0xe2, 0x35, 0x48, 0x7c, 0x2d, 0xac, 0xb3, - 0xaa, 0x58, 0x90, 0xb0, 0xff, 0xca, 0x30, 0x5c, 0x58, 0xaa, 0x57, 0x33, 0xd6, 0xd5, 0x15, 0x18, - 0x6e, 0x06, 0xee, 0x1e, 0x09, 0xc4, 0x38, 0x2b, 0x2a, 0x15, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x0a, - 0x63, 0xfc, 0x42, 0xba, 0xe1, 0x78, 0xcd, 0x96, 0x1c, 0xe2, 0xb3, 0x02, 0x7b, 0xec, 0xae, 0x06, - 0xc3, 0x06, 0xe6, 0x31, 0x17, 0xd5, 0x95, 0xc4, 0x66, 0xcc, 0xba, 0xec, 0xbe, 0x62, 0xc1, 0x14, - 0x6f, 0x66, 0x21, 0x8a, 0x02, 0x77, 0xa3, 0x13, 0x91, 0x70, 0x66, 0x88, 0x9d, 0x74, 0x4b, 0x69, - 0xa3, 0x95, 0x39, 0x02, 0x73, 0x77, 0x13, 0x54, 0xf8, 0x21, 0x38, 0x23, 0xda, 0x9d, 0x4a, 0x82, - 0x71, 0x57, 0xb3, 0xe8, 0x7b, 0x2d, 0x98, 0x6d, 0xf8, 0x5e, 0x14, 0xf8, 0xad, 0x16, 0x09, 0x6a, - 0x9d, 0x8d, 0x96, 0x1b, 0x6e, 0xf3, 0x75, 0x8a, 0xc9, 0x26, 0x3b, 0x09, 0x32, 0xe6, 0x50, 0x21, - 0x89, 0x39, 0xbc, 0x74, 0x78, 0x50, 0x9e, 0x5d, 0xca, 0x24, 0x85, 0x7b, 0x34, 0x83, 0x76, 0x00, - 0xd1, 0xab, 0xb4, 0x1e, 0x39, 0x5b, 0x24, 0x6e, 0x7c, 0x64, 0xf0, 0xc6, 0xcf, 0x1f, 0x1e, 0x94, - 0xd1, 0x5a, 0x17, 0x09, 0x9c, 0x42, 0x16, 0xbd, 0x07, 0x67, 0x69, 0x69, 0xd7, 0xb7, 0x16, 0x07, - 0x6f, 0x6e, 0xe6, 0xf0, 0xa0, 0x7c, 0x76, 0x2d, 0x85, 0x08, 0x4e, 0x25, 0x3d, 0xbb, 0x04, 0xe7, - 0x52, 0xa7, 0x0a, 0x4d, 0x41, 0x7e, 0x87, 0x70, 0x16, 0xa4, 0x84, 0xe9, 0x4f, 0x74, 0x16, 0x86, - 0xf6, 0x9c, 0x56, 0x47, 0xac, 0x52, 0xcc, 0xff, 0x7c, 0x26, 0xf7, 0xaa, 0x65, 0x37, 0x60, 0x6c, - 0xc9, 0x69, 0x3b, 0x1b, 0x6e, 0xcb, 0x8d, 0x5c, 0x12, 0xa2, 0xa7, 0x21, 0xef, 0x34, 0x9b, 0xec, - 0x8a, 0x2c, 0x2d, 0x9e, 0x3b, 0x3c, 0x28, 0xe7, 0x17, 0x9a, 0xf4, 0xac, 0x06, 0x85, 0xb5, 0x8f, - 0x29, 0x06, 0xfa, 0x24, 0x14, 0x9a, 0x81, 0xdf, 0x9e, 0xc9, 0x31, 0x4c, 0x3a, 0x54, 0x85, 0x4a, - 0xe0, 0xb7, 0x13, 0xa8, 0x0c, 0xc7, 0xfe, 0xf5, 0x1c, 0x3c, 0xb1, 0x44, 0xda, 0xdb, 0x2b, 0xf5, - 0x8c, 0x4d, 0x77, 0x15, 0x8a, 0xbb, 0xbe, 0xe7, 0x46, 0x7e, 0x10, 0x8a, 0xa6, 0xd9, 0x6d, 0xb2, - 0x2a, 0xca, 0xb0, 0x82, 0xa2, 0xcb, 0x50, 0x68, 0xc7, 0x9c, 0xc0, 0x98, 0xe4, 0x22, 0x18, 0x0f, - 0xc0, 0x20, 0x14, 0xa3, 0x13, 0x92, 0x40, 0xdc, 0x82, 0x0a, 0xe3, 0x4e, 0x48, 0x02, 0xcc, 0x20, - 0xf1, 0x71, 0x4a, 0x0f, 0x5a, 0xb1, 0xad, 0x12, 0xc7, 0x29, 0x85, 0x60, 0x0d, 0x0b, 0xd5, 0xa0, - 0x14, 0xaa, 0x49, 0x1d, 0x1a, 0x7c, 0x52, 0xc7, 0xd9, 0x79, 0xab, 0x66, 0x32, 0x26, 0x62, 0x1c, - 0x03, 0xc3, 0x7d, 0xcf, 0xdb, 0xaf, 0xe5, 0x00, 0xf1, 0x21, 0xfc, 0x33, 0x36, 0x70, 0x77, 0xba, - 0x07, 0x2e, 0x95, 0xf3, 0xba, 0xe5, 0x37, 0x9c, 0x56, 0xf2, 0x08, 0x3f, 0xa9, 0xd1, 0xfb, 0xdf, - 0x16, 0x3c, 0xb1, 0xe4, 0x7a, 0x4d, 0x12, 0x64, 0x2c, 0xc0, 0x47, 0x23, 0x80, 0x1c, 0xef, 0xa4, - 0x37, 0x96, 0x58, 0xe1, 0x04, 0x96, 0x98, 0xfd, 0x3f, 0x2c, 0x40, 0xfc, 0xb3, 0x3f, 0x72, 0x1f, - 0x7b, 0xa7, 0xfb, 0x63, 0x4f, 0x60, 0x59, 0xd8, 0xb7, 0x60, 0x62, 0xa9, 0xe5, 0x12, 0x2f, 0xaa, - 0xd6, 0x96, 0x7c, 0x6f, 0xd3, 0xdd, 0x42, 0x9f, 0x81, 0x09, 0x2a, 0xd3, 0xfa, 0x9d, 0xa8, 0x4e, - 0x1a, 0xbe, 0xc7, 0xd8, 0x7f, 0x2a, 0x09, 0xa2, 0xc3, 0x83, 0xf2, 0xc4, 0xba, 0x01, 0xc1, 0x09, - 0x4c, 0xfb, 0x77, 0xe9, 0xf8, 0xf9, 0xbb, 0x6d, 0xdf, 0x23, 0x5e, 0xb4, 0xe4, 0x7b, 0x4d, 0x2e, - 0x26, 0x7e, 0x06, 0x0a, 0x11, 0x1d, 0x0f, 0x3e, 0x76, 0x57, 0xe4, 0x46, 0xa1, 0xa3, 0x70, 0x74, - 0x50, 0x3e, 0xdf, 0x5d, 0x83, 0x8d, 0x13, 0xab, 0x83, 0xbe, 0x0d, 0x86, 0xc3, 0xc8, 0x89, 0x3a, - 0xa1, 0x18, 0xcd, 0x27, 0xe5, 0x68, 0xd6, 0x59, 0xe9, 0xd1, 0x41, 0x79, 0x52, 0x55, 0xe3, 0x45, - 0x58, 0x54, 0x40, 0xcf, 0xc0, 0xc8, 0x2e, 0x09, 0x43, 0x67, 0x4b, 0x72, 0xf8, 0x93, 0xa2, 0xee, - 0xc8, 0x2a, 0x2f, 0xc6, 0x12, 0x8e, 0x9e, 0x82, 0x21, 0x12, 0x04, 0x7e, 0x20, 0xf6, 0xe8, 0xb8, - 0x40, 0x1c, 0x5a, 0xa6, 0x85, 0x98, 0xc3, 0xec, 0x7f, 0x63, 0xc1, 0xa4, 0xea, 0x2b, 0x6f, 0xeb, - 0x14, 0x58, 0xb9, 0xb7, 0x01, 0x1a, 0xf2, 0x03, 0x43, 0x76, 0x7b, 0x8c, 0x5e, 0xbb, 0x92, 0xca, - 0xa0, 0x74, 0x0d, 0x63, 0x4c, 0x59, 0x15, 0x85, 0x58, 0xa3, 0x66, 0xff, 0x9a, 0x05, 0x67, 0x12, - 0x5f, 0x74, 0xcb, 0x0d, 0x23, 0xf4, 0x4e, 0xd7, 0x57, 0xcd, 0x0d, 0xf6, 0x55, 0xb4, 0x36, 0xfb, - 0x26, 0xb5, 0x94, 0x65, 0x89, 0xf6, 0x45, 0x37, 0x60, 0xc8, 0x8d, 0xc8, 0xae, 0xfc, 0x98, 0xa7, - 0x7a, 0x7e, 0x0c, 0xef, 0x55, 0x3c, 0x23, 0x55, 0x5a, 0x13, 0x73, 0x02, 0xf6, 0x0f, 0xe5, 0xa1, - 0xc4, 0x97, 0xed, 0xaa, 0xd3, 0x3e, 0x85, 0xb9, 0xa8, 0x42, 0x81, 0x51, 0xe7, 0x1d, 0x7f, 0x3a, - 0xbd, 0xe3, 0xa2, 0x3b, 0x73, 0x54, 0x4e, 0xe3, 0xac, 0xa0, 0xba, 0x1a, 0x68, 0x11, 0x66, 0x24, - 0x90, 0x03, 0xb0, 0xe1, 0x7a, 0x4e, 0xb0, 0x4f, 0xcb, 0x66, 0xf2, 0x8c, 0xe0, 0xf3, 0xbd, 0x09, - 0x2e, 0x2a, 0x7c, 0x4e, 0x56, 0xf5, 0x35, 0x06, 0x60, 0x8d, 0xe8, 0xec, 0x2b, 0x50, 0x52, 0xc8, - 0xc7, 0xe1, 0x71, 0x66, 0x3f, 0x0b, 0x93, 0x89, 0xb6, 0xfa, 0x55, 0x1f, 0xd3, 0x59, 0xa4, 0x5f, - 0x61, 0xa7, 0x80, 0xe8, 0xf5, 0xb2, 0xb7, 0x27, 0x4e, 0xd1, 0xf7, 0xe1, 0x6c, 0x2b, 0xe5, 0x70, - 0x12, 0x53, 0x35, 0xf8, 0x61, 0xf6, 0x84, 0xf8, 0xec, 0xb3, 0x69, 0x50, 0x9c, 0xda, 0x06, 0xbd, - 0xf6, 0xfd, 0x36, 0x5d, 0xf3, 0x4e, 0x8b, 0xf5, 0x57, 0x48, 0xdf, 0xb7, 0x45, 0x19, 0x56, 0x50, - 0x7a, 0x84, 0x9d, 0x55, 0x9d, 0xbf, 0x49, 0xf6, 0xeb, 0xa4, 0x45, 0x1a, 0x91, 0x1f, 0x7c, 0xa8, - 0xdd, 0xbf, 0xc8, 0x47, 0x9f, 0x9f, 0x80, 0xa3, 0x82, 0x40, 0xfe, 0x26, 0xd9, 0xe7, 0x53, 0xa1, - 0x7f, 0x5d, 0xbe, 0xe7, 0xd7, 0xfd, 0x9c, 0x05, 0xe3, 0xea, 0xeb, 0x4e, 0x61, 0xab, 0x2f, 0x9a, - 0x5b, 0xfd, 0x62, 0xcf, 0x05, 0x9e, 0xb1, 0xc9, 0xbf, 0x96, 0x83, 0x0b, 0x0a, 0x87, 0xb2, 0xfb, - 0xfc, 0x8f, 0x58, 0x55, 0xf3, 0x50, 0xf2, 0x94, 0xf6, 0xc0, 0x32, 0xc5, 0xf6, 0x58, 0x77, 0x10, - 0xe3, 0x50, 0xae, 0xcd, 0x8b, 0x45, 0xfc, 0x31, 0x5d, 0xad, 0x26, 0x54, 0x68, 0x8b, 0x90, 0xef, - 0xb8, 0x4d, 0x71, 0x67, 0x7c, 0x4a, 0x8e, 0xf6, 0x9d, 0x6a, 0xe5, 0xe8, 0xa0, 0xfc, 0x64, 0x96, - 0x4a, 0x97, 0x5e, 0x56, 0xe1, 0xdc, 0x9d, 0x6a, 0x05, 0xd3, 0xca, 0x68, 0x01, 0x26, 0xa5, 0xd6, - 0xfa, 0x2e, 0xe5, 0xa0, 0x7c, 0x4f, 0x5c, 0x2d, 0x4a, 0x37, 0x86, 0x4d, 0x30, 0x4e, 0xe2, 0xa3, - 0x0a, 0x4c, 0xed, 0x74, 0x36, 0x48, 0x8b, 0x44, 0xfc, 0x83, 0x6f, 0x12, 0xae, 0x39, 0x2a, 0xc5, - 0xa2, 0xe5, 0xcd, 0x04, 0x1c, 0x77, 0xd5, 0xb0, 0xff, 0x94, 0x1d, 0xf1, 0x62, 0xf4, 0x6a, 0x81, - 0x4f, 0x17, 0x16, 0xa5, 0xfe, 0x61, 0x2e, 0xe7, 0x41, 0x56, 0xc5, 0x4d, 0xb2, 0xbf, 0xee, 0x53, - 0x66, 0x3b, 0x7d, 0x55, 0x18, 0x6b, 0xbe, 0xd0, 0x73, 0xcd, 0xff, 0x42, 0x0e, 0xce, 0xa9, 0x11, - 0x30, 0xf8, 0xba, 0x3f, 0xeb, 0x63, 0xf0, 0x02, 0x8c, 0x36, 0xc9, 0xa6, 0xd3, 0x69, 0x45, 0x4a, - 0x8d, 0x39, 0xc4, 0x55, 0xd9, 0x95, 0xb8, 0x18, 0xeb, 0x38, 0xc7, 0x18, 0xb6, 0x9f, 0x1c, 0x65, - 0x77, 0x6b, 0xe4, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, 0xb9, 0x6b, 0x9e, 0x82, 0x21, 0x77, 0x97, - 0xf2, 0x5a, 0x39, 0x93, 0x85, 0xaa, 0xd2, 0x42, 0xcc, 0x61, 0xe8, 0x13, 0x30, 0xd2, 0xf0, 0x77, - 0x77, 0x1d, 0xaf, 0xc9, 0xae, 0xbc, 0xd2, 0xe2, 0x28, 0x65, 0xc7, 0x96, 0x78, 0x11, 0x96, 0x30, - 0xf4, 0x04, 0x14, 0x9c, 0x60, 0x2b, 0x9c, 0x29, 0x30, 0x9c, 0x22, 0x6d, 0x69, 0x21, 0xd8, 0x0a, - 0x31, 0x2b, 0xa5, 0x52, 0xd5, 0x7d, 0x3f, 0xd8, 0x71, 0xbd, 0xad, 0x8a, 0x1b, 0x88, 0x2d, 0xa1, - 0xee, 0xc2, 0x7b, 0x0a, 0x82, 0x35, 0x2c, 0xb4, 0x02, 0x43, 0x6d, 0x3f, 0x88, 0xc2, 0x99, 0x61, - 0x36, 0xdc, 0x4f, 0x66, 0x1c, 0x44, 0xfc, 0x6b, 0x6b, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff, 0x85, - 0x98, 0x57, 0x47, 0xdf, 0x06, 0x79, 0xe2, 0xed, 0xcd, 0x8c, 0x30, 0x2a, 0xb3, 0x69, 0x54, 0x96, - 0xbd, 0xbd, 0xbb, 0x4e, 0x10, 0x9f, 0xd2, 0xcb, 0xde, 0x1e, 0xa6, 0x75, 0xd0, 0xe7, 0xa1, 0x24, - 0xb7, 0x78, 0x28, 0xd4, 0x1c, 0xa9, 0x4b, 0x4c, 0x1e, 0x0c, 0x98, 0xbc, 0xd7, 0x71, 0x03, 0xb2, - 0x4b, 0xbc, 0x28, 0x8c, 0xcf, 0x34, 0x09, 0x0d, 0x71, 0x4c, 0x0d, 0x7d, 0x5e, 0xea, 0xd6, 0x56, - 0xfd, 0x8e, 0x17, 0x85, 0x33, 0x25, 0xd6, 0xbd, 0xd4, 0x57, 0x8f, 0xbb, 0x31, 0x5e, 0x52, 0xf9, - 0xc6, 0x2b, 0x63, 0x83, 0x14, 0xc2, 0x30, 0xde, 0x72, 0xf7, 0x88, 0x47, 0xc2, 0xb0, 0x16, 0xf8, - 0x1b, 0x64, 0x06, 0x58, 0xcf, 0x2f, 0xa4, 0x3f, 0x06, 0xf8, 0x1b, 0x64, 0x71, 0xfa, 0xf0, 0xa0, - 0x3c, 0x7e, 0x4b, 0xaf, 0x83, 0x4d, 0x12, 0xe8, 0x0e, 0x4c, 0x50, 0xb9, 0xc6, 0x8d, 0x89, 0x8e, - 0xf6, 0x23, 0xca, 0xa4, 0x0f, 0x6c, 0x54, 0xc2, 0x09, 0x22, 0xe8, 0x4d, 0x28, 0xb5, 0xdc, 0x4d, - 0xd2, 0xd8, 0x6f, 0xb4, 0xc8, 0xcc, 0x18, 0xa3, 0x98, 0xba, 0xad, 0x6e, 0x49, 0x24, 0x2e, 0x17, - 0xa9, 0xbf, 0x38, 0xae, 0x8e, 0xee, 0xc2, 0xf9, 0x88, 0x04, 0xbb, 0xae, 0xe7, 0xd0, 0xed, 0x20, - 0xe4, 0x05, 0xf6, 0xa4, 0x32, 0xce, 0xd6, 0xdb, 0x25, 0x31, 0x74, 0xe7, 0xd7, 0x53, 0xb1, 0x70, - 0x46, 0x6d, 0x74, 0x1b, 0x26, 0xd9, 0x4e, 0xa8, 0x75, 0x5a, 0xad, 0x9a, 0xdf, 0x72, 0x1b, 0xfb, - 0x33, 0x13, 0x8c, 0xe0, 0x27, 0xe4, 0xbd, 0x50, 0x35, 0xc1, 0x47, 0x07, 0x65, 0x88, 0xff, 0xe1, - 0x64, 0x6d, 0xb4, 0xc1, 0x74, 0xe8, 0x9d, 0xc0, 0x8d, 0xf6, 0xe9, 0xfa, 0x25, 0x0f, 0xa2, 0x99, - 0xc9, 0x9e, 0xa2, 0xb0, 0x8e, 0xaa, 0x14, 0xed, 0x7a, 0x21, 0x4e, 0x12, 0xa4, 0x5b, 0x3b, 0x8c, - 0x9a, 0xae, 0x37, 0x33, 0xc5, 0x4e, 0x0c, 0xb5, 0x33, 0xea, 0xb4, 0x10, 0x73, 0x18, 0xd3, 0x9f, - 0xd3, 0x1f, 0xb7, 0xe9, 0x09, 0x3a, 0xcd, 0x10, 0x63, 0xfd, 0xb9, 0x04, 0xe0, 0x18, 0x87, 0x32, - 0x35, 0x51, 0xb4, 0x3f, 0x83, 0x18, 0xaa, 0xda, 0x2e, 0xeb, 0xeb, 0x9f, 0xc7, 0xb4, 0x1c, 0xdd, - 0x82, 0x11, 0xe2, 0xed, 0xad, 0x04, 0xfe, 0xee, 0xcc, 0x99, 0xec, 0x3d, 0xbb, 0xcc, 0x51, 0xf8, - 0x81, 0x1e, 0x0b, 0x78, 0xa2, 0x18, 0x4b, 0x12, 0xe8, 0x01, 0xcc, 0xa4, 0xcc, 0x08, 0x9f, 0x80, - 0xb3, 0x6c, 0x02, 0x5e, 0x17, 0x75, 0x67, 0xd6, 0x33, 0xf0, 0x8e, 0x7a, 0xc0, 0x70, 0x26, 0x75, - 0xf4, 0x05, 0x18, 0xe7, 0x1b, 0x8a, 0x3f, 0xbe, 0x85, 0x33, 0xe7, 0xd8, 0xd7, 0x5c, 0xce, 0xde, - 0x9c, 0x1c, 0x71, 0xf1, 0x9c, 0xe8, 0xd0, 0xb8, 0x5e, 0x1a, 0x62, 0x93, 0x9a, 0xbd, 0x01, 0x13, - 0xea, 0xdc, 0x62, 0x4b, 0x07, 0x95, 0x61, 0x88, 0x71, 0x3b, 0x42, 0xbf, 0x55, 0xa2, 0x33, 0xc5, - 0x38, 0x21, 0xcc, 0xcb, 0xd9, 0x4c, 0xb9, 0xef, 0x93, 0xc5, 0xfd, 0x88, 0x70, 0xa9, 0x3a, 0xaf, - 0xcd, 0x94, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0x7f, 0x9c, 0x6b, 0x8c, 0x0f, 0xc7, 0x01, 0xae, 0x83, - 0xe7, 0xa0, 0xb8, 0xed, 0x87, 0x11, 0xc5, 0x66, 0x6d, 0x0c, 0xc5, 0x7c, 0xe2, 0x0d, 0x51, 0x8e, - 0x15, 0x06, 0x7a, 0x0d, 0xc6, 0x1b, 0x7a, 0x03, 0xe2, 0x2e, 0x53, 0x43, 0x60, 0xb4, 0x8e, 0x4d, - 0x5c, 0xf4, 0x2a, 0x14, 0xd9, 0xd3, 0x79, 0xc3, 0x6f, 0x09, 0x26, 0x4b, 0x5e, 0xc8, 0xc5, 0x9a, - 0x28, 0x3f, 0xd2, 0x7e, 0x63, 0x85, 0x8d, 0xae, 0xc0, 0x30, 0xed, 0x42, 0xb5, 0x26, 0x6e, 0x11, - 0xa5, 0xaa, 0xb9, 0xc1, 0x4a, 0xb1, 0x80, 0xda, 0x7f, 0x2d, 0xa7, 0x8d, 0x32, 0x95, 0x48, 0x09, - 0xaa, 0xc1, 0xc8, 0x7d, 0xc7, 0x8d, 0x5c, 0x6f, 0x4b, 0xb0, 0x0b, 0xcf, 0xf4, 0xbc, 0x52, 0x58, - 0xa5, 0x7b, 0xbc, 0x02, 0xbf, 0xf4, 0xc4, 0x1f, 0x2c, 0xc9, 0x50, 0x8a, 0x41, 0xc7, 0xf3, 0x28, - 0xc5, 0xdc, 0xa0, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, 0x58, 0x92, 0x41, 0xef, 0x00, 0xc8, - 0x65, 0x49, 0x9a, 0xe2, 0xc9, 0xfa, 0xb9, 0xfe, 0x44, 0xd7, 0x55, 0x9d, 0xc5, 0x09, 0x7a, 0xa5, - 0xc6, 0xff, 0xb1, 0x46, 0xcf, 0x8e, 0x18, 0x5b, 0xd5, 0xdd, 0x19, 0xf4, 0x9d, 0xf4, 0x24, 0x70, - 0x82, 0x88, 0x34, 0x17, 0x22, 0x31, 0x38, 0x9f, 0x1c, 0x4c, 0xa6, 0x58, 0x77, 0x77, 0x89, 0x7e, - 0x6a, 0x08, 0x22, 0x38, 0xa6, 0x67, 0xff, 0x52, 0x1e, 0x66, 0xb2, 0xba, 0x4b, 0x17, 0x1d, 0x79, - 0xe0, 0x46, 0x4b, 0x94, 0x1b, 0xb2, 0xcc, 0x45, 0xb7, 0x2c, 0xca, 0xb1, 0xc2, 0xa0, 0xb3, 0x1f, - 0xba, 0x5b, 0x52, 0x24, 0x1c, 0x8a, 0x67, 0xbf, 0xce, 0x4a, 0xb1, 0x80, 0x52, 0xbc, 0x80, 0x38, - 0xa1, 0xb0, 0x89, 0xd0, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40, 0x75, 0x7d, 0x53, 0xa1, 0x8f, 0xbe, - 0xc9, 0x18, 0xa2, 0xa1, 0x93, 0x1d, 0x22, 0xf4, 0x45, 0x80, 0x4d, 0xd7, 0x73, 0xc3, 0x6d, 0x46, - 0x7d, 0xf8, 0xd8, 0xd4, 0x15, 0x2f, 0xb5, 0xa2, 0xa8, 0x60, 0x8d, 0x22, 0x7a, 0x19, 0x46, 0xd5, - 0x06, 0xac, 0x56, 0xd8, 0x03, 0x91, 0xf6, 0xe0, 0x1e, 0x9f, 0x46, 0x15, 0xac, 0xe3, 0xd9, 0xef, - 0x26, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0x83, 0x8e, 0x6f, 0xae, 0xf7, 0xf8, 0xda, 0xbf, - 0x91, 0x87, 0x49, 0xa3, 0xb1, 0x4e, 0x38, 0xc0, 0x99, 0x75, 0x9d, 0xde, 0x73, 0x4e, 0x44, 0xc4, - 0xfe, 0xb3, 0xfb, 0x6f, 0x15, 0xfd, 0x2e, 0xa4, 0x3b, 0x80, 0xd7, 0x47, 0x5f, 0x84, 0x52, 0xcb, - 0x09, 0x99, 0xee, 0x8a, 0x88, 0x7d, 0x37, 0x08, 0xb1, 0x58, 0x8e, 0x70, 0xc2, 0x48, 0xbb, 0x6a, - 0x38, 0xed, 0x98, 0x24, 0xbd, 0x90, 0x29, 0xef, 0x23, 0x8d, 0x6e, 0x54, 0x27, 0x28, 0x83, 0xb4, - 0x8f, 0x39, 0x0c, 0xbd, 0x0a, 0x63, 0x01, 0x61, 0xab, 0x62, 0x89, 0xb2, 0x72, 0x6c, 0x99, 0x0d, - 0xc5, 0x3c, 0x1f, 0xd6, 0x60, 0xd8, 0xc0, 0x8c, 0x59, 0xf9, 0xe1, 0x1e, 0xac, 0xfc, 0x33, 0x30, - 0xc2, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0xa3, 0xca, 0x8b, 0xb1, 0x84, 0x27, 0x17, 0x4c, 0x71, 0xc0, - 0x05, 0xf3, 0x49, 0x98, 0xa8, 0x38, 0x64, 0xd7, 0xf7, 0x96, 0xbd, 0x66, 0xdb, 0x77, 0xbd, 0x08, - 0xcd, 0x40, 0x81, 0xdd, 0x0e, 0x7c, 0x6f, 0x17, 0x28, 0x05, 0x5c, 0xa0, 0x8c, 0xb9, 0xbd, 0x05, - 0xe7, 0x2a, 0xfe, 0x7d, 0xef, 0xbe, 0x13, 0x34, 0x17, 0x6a, 0x55, 0x4d, 0xce, 0x5d, 0x93, 0x72, - 0x16, 0x37, 0x62, 0x49, 0x3d, 0x53, 0xb5, 0x9a, 0xfc, 0xae, 0x5d, 0x71, 0x5b, 0x24, 0x43, 0x1b, - 0xf1, 0x37, 0x72, 0x46, 0x4b, 0x31, 0xbe, 0x7a, 0x30, 0xb2, 0x32, 0x1f, 0x8c, 0xde, 0x82, 0xe2, - 0xa6, 0x4b, 0x5a, 0x4d, 0x4c, 0x36, 0xc5, 0x12, 0x7b, 0x3a, 0xfb, 0x5d, 0x7e, 0x85, 0x62, 0x4a, - 0xed, 0x13, 0x97, 0xd2, 0x56, 0x44, 0x65, 0xac, 0xc8, 0xa0, 0x1d, 0x98, 0x92, 0x62, 0x80, 0x84, - 0x8a, 0x05, 0xf7, 0x4c, 0x2f, 0xd9, 0xc2, 0x24, 0x7e, 0xf6, 0xf0, 0xa0, 0x3c, 0x85, 0x13, 0x64, - 0x70, 0x17, 0x61, 0x2a, 0x96, 0xed, 0xd2, 0xa3, 0xb5, 0xc0, 0x86, 0x9f, 0x89, 0x65, 0x4c, 0xc2, - 0x64, 0xa5, 0xf6, 0x8f, 0x59, 0xf0, 0x58, 0xd7, 0xc8, 0x08, 0x49, 0xfb, 0x84, 0x67, 0x21, 0x29, - 0xf9, 0xe6, 0xfa, 0x4b, 0xbe, 0xf6, 0xdf, 0xb7, 0xe0, 0xec, 0xf2, 0x6e, 0x3b, 0xda, 0xaf, 0xb8, - 0xe6, 0xeb, 0xce, 0x2b, 0x30, 0xbc, 0x4b, 0x9a, 0x6e, 0x67, 0x57, 0xcc, 0x5c, 0x59, 0x1e, 0x3f, - 0xab, 0xac, 0xf4, 0xe8, 0xa0, 0x3c, 0x5e, 0x8f, 0xfc, 0xc0, 0xd9, 0x22, 0xbc, 0x00, 0x0b, 0x74, - 0x76, 0x88, 0xbb, 0xef, 0x93, 0x5b, 0xee, 0xae, 0x2b, 0xed, 0x2c, 0x7a, 0xea, 0xce, 0xe6, 0xe4, - 0x80, 0xce, 0xbd, 0xd5, 0x71, 0xbc, 0xc8, 0x8d, 0xf6, 0xc5, 0xc3, 0x8c, 0x24, 0x82, 0x63, 0x7a, - 0xf6, 0x37, 0x2c, 0x98, 0x94, 0xeb, 0x7e, 0xa1, 0xd9, 0x0c, 0x48, 0x18, 0xa2, 0x59, 0xc8, 0xb9, - 0x6d, 0xd1, 0x4b, 0x10, 0xbd, 0xcc, 0x55, 0x6b, 0x38, 0xe7, 0xb6, 0x51, 0x0d, 0x4a, 0xdc, 0x5c, - 0x23, 0x5e, 0x5c, 0x03, 0x19, 0x7d, 0xb0, 0x1e, 0xac, 0xcb, 0x9a, 0x38, 0x26, 0x22, 0x39, 0x38, - 0x76, 0x66, 0xe6, 0xcd, 0x57, 0xaf, 0x1b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x15, 0x8a, 0x9e, 0xdf, - 0xe4, 0xd6, 0x33, 0xfc, 0xf6, 0x63, 0x4b, 0x76, 0x4d, 0x94, 0x61, 0x05, 0xb5, 0x7f, 0xd0, 0x82, - 0x31, 0xf9, 0x65, 0x03, 0x32, 0x93, 0x74, 0x6b, 0xc5, 0x8c, 0x64, 0xbc, 0xb5, 0x28, 0x33, 0xc8, - 0x20, 0x06, 0x0f, 0x98, 0x3f, 0x0e, 0x0f, 0x68, 0xff, 0x68, 0x0e, 0x26, 0x64, 0x77, 0xea, 0x9d, - 0x8d, 0x90, 0x44, 0x68, 0x1d, 0x4a, 0x0e, 0x1f, 0x72, 0x22, 0x57, 0xec, 0x53, 0xe9, 0xc2, 0x87, - 0x31, 0x3f, 0xf1, 0xb5, 0xbc, 0x20, 0x6b, 0xe3, 0x98, 0x10, 0x6a, 0xc1, 0xb4, 0xe7, 0x47, 0xec, - 0x88, 0x56, 0xf0, 0x5e, 0x4f, 0x20, 0x49, 0xea, 0x17, 0x04, 0xf5, 0xe9, 0xb5, 0x24, 0x15, 0xdc, - 0x4d, 0x18, 0x2d, 0x4b, 0x85, 0x47, 0x3e, 0x5b, 0xdc, 0xd0, 0x67, 0x21, 0x5d, 0xdf, 0x61, 0xff, - 0xaa, 0x05, 0x25, 0x89, 0x76, 0x1a, 0xaf, 0x5d, 0xab, 0x30, 0x12, 0xb2, 0x49, 0x90, 0x43, 0x63, - 0xf7, 0xea, 0x38, 0x9f, 0xaf, 0xf8, 0xe6, 0xe1, 0xff, 0x43, 0x2c, 0x69, 0x30, 0x7d, 0xb7, 0xea, - 0xfe, 0x47, 0x44, 0xdf, 0xad, 0xfa, 0x93, 0x71, 0xc3, 0xfc, 0x57, 0xd6, 0x67, 0x4d, 0xac, 0xa5, - 0x0c, 0x52, 0x3b, 0x20, 0x9b, 0xee, 0x83, 0x24, 0x83, 0x54, 0x63, 0xa5, 0x58, 0x40, 0xd1, 0x3b, - 0x30, 0xd6, 0x90, 0x8a, 0xce, 0xf8, 0x18, 0xb8, 0xd2, 0x53, 0xe9, 0xae, 0xde, 0x67, 0xb8, 0x65, - 0xed, 0x92, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0xe7, 0xf6, 0x7c, 0xbf, 0xe7, 0xf6, 0x98, 0x6e, 0xf6, - 0xe3, 0xf3, 0x8f, 0x5b, 0x30, 0xcc, 0xd5, 0x65, 0x83, 0xe9, 0x17, 0xb5, 0xe7, 0xaa, 0x78, 0xec, - 0xee, 0xd2, 0x42, 0xf1, 0xfc, 0x84, 0x56, 0xa1, 0xc4, 0x7e, 0x30, 0xb5, 0x41, 0x3e, 0xdb, 0xa4, - 0x98, 0xb7, 0xaa, 0x77, 0xf0, 0xae, 0xac, 0x86, 0x63, 0x0a, 0xf6, 0x0f, 0xe7, 0xe9, 0x51, 0x15, - 0xa3, 0x1a, 0x37, 0xb8, 0xf5, 0xe8, 0x6e, 0xf0, 0xdc, 0xa3, 0xba, 0xc1, 0xb7, 0x60, 0xb2, 0xa1, - 0x3d, 0x6e, 0xc5, 0x33, 0x79, 0xb5, 0xe7, 0x22, 0xd1, 0xde, 0xc1, 0xb8, 0xca, 0x68, 0xc9, 0x24, - 0x82, 0x93, 0x54, 0xd1, 0x77, 0xc2, 0x18, 0x9f, 0x67, 0xd1, 0x0a, 0xb7, 0x58, 0xf8, 0x44, 0xf6, - 0x7a, 0xd1, 0x9b, 0x60, 0x2b, 0xb1, 0xae, 0x55, 0xc7, 0x06, 0x31, 0xfb, 0x97, 0x8a, 0x30, 0xb4, - 0xbc, 0x47, 0xbc, 0xe8, 0x14, 0x0e, 0xa4, 0x06, 0x4c, 0xb8, 0xde, 0x9e, 0xdf, 0xda, 0x23, 0x4d, - 0x0e, 0x3f, 0xce, 0xe5, 0x7a, 0x5e, 0x90, 0x9e, 0xa8, 0x1a, 0x24, 0x70, 0x82, 0xe4, 0xa3, 0x90, - 0x30, 0xaf, 0xc3, 0x30, 0x9f, 0x7b, 0x21, 0x5e, 0xa6, 0x2a, 0x83, 0xd9, 0x20, 0x8a, 0x5d, 0x10, - 0x4b, 0xbf, 0x5c, 0xfb, 0x2c, 0xaa, 0xa3, 0x77, 0x61, 0x62, 0xd3, 0x0d, 0xc2, 0x88, 0x8a, 0x86, - 0x61, 0xe4, 0xec, 0xb6, 0x1f, 0x42, 0xa2, 0x54, 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, - 0x0b, 0xc6, 0xa9, 0x90, 0x13, 0x37, 0x35, 0x72, 0xec, 0xa6, 0x94, 0xca, 0xe8, 0x96, 0x4e, 0x08, - 0x9b, 0x74, 0xe9, 0x61, 0xd2, 0x60, 0x42, 0x51, 0x91, 0x71, 0x14, 0xea, 0x30, 0xe1, 0xd2, 0x10, - 0x87, 0xd1, 0x33, 0x89, 0x99, 0xad, 0x94, 0xcc, 0x33, 0x49, 0x33, 0x4e, 0xf9, 0x12, 0x94, 0x08, - 0x1d, 0x42, 0x4a, 0x58, 0x28, 0xc6, 0xe7, 0x07, 0xeb, 0xeb, 0xaa, 0xdb, 0x08, 0x7c, 0x53, 0x96, - 0x5f, 0x96, 0x94, 0x70, 0x4c, 0x14, 0x2d, 0xc1, 0x70, 0x48, 0x02, 0x97, 0x84, 0x42, 0x45, 0xde, - 0x63, 0x1a, 0x19, 0x1a, 0xb7, 0x3d, 0xe7, 0xbf, 0xb1, 0xa8, 0x4a, 0x97, 0x97, 0xc3, 0xa4, 0x21, - 0xa6, 0x15, 0xd7, 0x96, 0xd7, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x84, 0x91, 0x80, 0xb4, 0x98, - 0xb2, 0x68, 0x7c, 0xf0, 0x45, 0xce, 0x75, 0x4f, 0xbc, 0x1e, 0x96, 0x04, 0xd0, 0x4d, 0x40, 0x01, - 0xa1, 0x3c, 0x84, 0xeb, 0x6d, 0x29, 0x63, 0x0e, 0xa1, 0xeb, 0x7e, 0x5c, 0xb4, 0x7f, 0x06, 0xc7, - 0x18, 0xd2, 0x2a, 0x15, 0xa7, 0x54, 0x43, 0xd7, 0x61, 0x5a, 0x95, 0x56, 0xbd, 0x30, 0x72, 0xbc, - 0x06, 0x61, 0x6a, 0xee, 0x52, 0xcc, 0x15, 0xe1, 0x24, 0x02, 0xee, 0xae, 0x63, 0xff, 0x0c, 0x65, - 0x67, 0xe8, 0x68, 0x9d, 0x02, 0x2f, 0xf0, 0x86, 0xc9, 0x0b, 0x5c, 0xc8, 0x9c, 0xb9, 0x0c, 0x3e, - 0xe0, 0xd0, 0x82, 0x51, 0x6d, 0x66, 0xe3, 0x35, 0x6b, 0xf5, 0x58, 0xb3, 0x1d, 0x98, 0xa2, 0x2b, - 0xfd, 0xf6, 0x46, 0x48, 0x82, 0x3d, 0xd2, 0x64, 0x0b, 0x33, 0xf7, 0x70, 0x0b, 0x53, 0xbd, 0x32, - 0xdf, 0x4a, 0x10, 0xc4, 0x5d, 0x4d, 0xa0, 0x57, 0xa4, 0xe6, 0x24, 0x6f, 0x18, 0x69, 0x71, 0xad, - 0xc8, 0xd1, 0x41, 0x79, 0x4a, 0xfb, 0x10, 0x5d, 0x53, 0x62, 0x7f, 0x49, 0x7e, 0xa3, 0x7a, 0xcd, - 0x6f, 0xa8, 0xc5, 0x92, 0x78, 0xcd, 0x57, 0xcb, 0x01, 0xc7, 0x38, 0x74, 0x8f, 0x52, 0x11, 0x24, - 0xf9, 0x9a, 0x4f, 0x05, 0x14, 0xcc, 0x20, 0xf6, 0x8b, 0x00, 0xcb, 0x0f, 0x48, 0x83, 0x2f, 0x75, - 0xfd, 0x01, 0xd2, 0xca, 0x7e, 0x80, 0xb4, 0xff, 0x9d, 0x05, 0x13, 0x2b, 0x4b, 0x86, 0x98, 0x38, - 0x07, 0xc0, 0x65, 0xa3, 0x7b, 0xf7, 0xd6, 0xa4, 0x6e, 0x9d, 0xab, 0x47, 0x55, 0x29, 0xd6, 0x30, - 0xd0, 0x05, 0xc8, 0xb7, 0x3a, 0x9e, 0x10, 0x59, 0x46, 0x0e, 0x0f, 0xca, 0xf9, 0x5b, 0x1d, 0x0f, - 0xd3, 0x32, 0xcd, 0x42, 0x30, 0x3f, 0xb0, 0x85, 0x60, 0x5f, 0xf7, 0x2a, 0x54, 0x86, 0xa1, 0xfb, - 0xf7, 0xdd, 0x26, 0x37, 0x62, 0x17, 0x7a, 0xff, 0x7b, 0xf7, 0xaa, 0x95, 0x10, 0xf3, 0x72, 0xfb, - 0xab, 0x79, 0x98, 0x5d, 0x69, 0x91, 0x07, 0x1f, 0xd0, 0x90, 0x7f, 0x50, 0xfb, 0xc6, 0xe3, 0xf1, - 0x8b, 0xc7, 0xb5, 0x61, 0xed, 0x3f, 0x1e, 0x9b, 0x30, 0xc2, 0x1f, 0xb3, 0xa5, 0x59, 0xff, 0x6b, - 0x69, 0xad, 0x67, 0x0f, 0xc8, 0x1c, 0x7f, 0x14, 0x17, 0xe6, 0xfc, 0xea, 0xa6, 0x15, 0xa5, 0x58, - 0x12, 0x9f, 0xfd, 0x0c, 0x8c, 0xe9, 0x98, 0xc7, 0xb2, 0x26, 0xff, 0x0b, 0x79, 0x98, 0xa2, 0x3d, - 0x78, 0xa4, 0x13, 0x71, 0xa7, 0x7b, 0x22, 0x4e, 0xda, 0xa2, 0xb8, 0xff, 0x6c, 0xbc, 0x93, 0x9c, - 0x8d, 0x17, 0xb2, 0x66, 0xe3, 0xb4, 0xe7, 0xe0, 0x7b, 0x2d, 0x38, 0xb3, 0xd2, 0xf2, 0x1b, 0x3b, - 0x09, 0xab, 0xdf, 0x97, 0x61, 0x94, 0x9e, 0xe3, 0xa1, 0xe1, 0x45, 0x64, 0xf8, 0x95, 0x09, 0x10, - 0xd6, 0xf1, 0xb4, 0x6a, 0x77, 0xee, 0x54, 0x2b, 0x69, 0xee, 0x68, 0x02, 0x84, 0x75, 0x3c, 0xfb, - 0xeb, 0x16, 0x5c, 0xbc, 0xbe, 0xb4, 0x1c, 0x2f, 0xc5, 0x2e, 0x8f, 0x38, 0x2a, 0x05, 0x36, 0xb5, - 0xae, 0xc4, 0x52, 0x60, 0x85, 0xf5, 0x42, 0x40, 0x3f, 0x2a, 0xde, 0x9e, 0x3f, 0x6d, 0xc1, 0x99, - 0xeb, 0x6e, 0x44, 0xaf, 0xe5, 0xa4, 0x6f, 0x16, 0xbd, 0x97, 0x43, 0x37, 0xf2, 0x83, 0xfd, 0xa4, - 0x6f, 0x16, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x96, 0xf7, 0x5c, 0x66, 0x46, 0x95, 0x33, 0x55, 0x51, - 0x58, 0x94, 0x63, 0x85, 0x41, 0x3f, 0xac, 0xe9, 0x06, 0x4c, 0x94, 0xd8, 0x17, 0x27, 0xac, 0xfa, - 0xb0, 0x8a, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0x31, 0x0b, 0xce, 0x5d, 0x6f, 0x75, 0xc2, 0x88, 0x04, - 0x9b, 0xa1, 0xd1, 0xd9, 0x17, 0xa1, 0x44, 0xa4, 0xb8, 0x2e, 0xfa, 0xaa, 0x18, 0x4c, 0x25, 0xc7, - 0x73, 0xc7, 0x30, 0x85, 0x37, 0x80, 0xe7, 0xc0, 0xf1, 0x5c, 0xc7, 0x7e, 0x3e, 0x07, 0xe3, 0x37, - 0xd6, 0xd7, 0x6b, 0xd7, 0x49, 0x24, 0x6e, 0xb1, 0xfe, 0xaa, 0x66, 0xac, 0x69, 0xcc, 0x7a, 0x09, - 0x45, 0x9d, 0xc8, 0x6d, 0xcd, 0x71, 0x4f, 0xe4, 0xb9, 0xaa, 0x17, 0xdd, 0x0e, 0xea, 0x51, 0xe0, - 0x7a, 0x5b, 0xa9, 0x3a, 0x36, 0x79, 0xd7, 0xe6, 0xb3, 0xee, 0x5a, 0xf4, 0x22, 0x0c, 0x33, 0x57, - 0x68, 0x29, 0x9e, 0x3c, 0xae, 0x64, 0x0a, 0x56, 0x7a, 0x74, 0x50, 0x2e, 0xdd, 0xc1, 0x55, 0xfe, - 0x07, 0x0b, 0x54, 0x74, 0x07, 0x46, 0xb7, 0xa3, 0xa8, 0x7d, 0x83, 0x38, 0x4d, 0x12, 0xc8, 0xd3, - 0xe1, 0x52, 0xda, 0xe9, 0x40, 0x07, 0x81, 0xa3, 0xc5, 0x1b, 0x2a, 0x2e, 0x0b, 0xb1, 0x4e, 0xc7, - 0xae, 0x03, 0xc4, 0xb0, 0x13, 0xd2, 0x2f, 0xd8, 0x7f, 0x60, 0xc1, 0x08, 0xf7, 0x4a, 0x0b, 0xd0, - 0xeb, 0x50, 0x20, 0x0f, 0x48, 0x43, 0x70, 0x8e, 0xa9, 0x1d, 0x8e, 0x19, 0x0f, 0xae, 0x2d, 0xa7, - 0xff, 0x31, 0xab, 0x85, 0x6e, 0xc0, 0x08, 0xed, 0xed, 0x75, 0xe5, 0xa2, 0xf7, 0x64, 0xd6, 0x17, - 0xab, 0x69, 0xe7, 0xbc, 0x8a, 0x28, 0xc2, 0xb2, 0x3a, 0xd3, 0xfc, 0x36, 0xda, 0x75, 0x7a, 0x80, - 0x45, 0xbd, 0xee, 0xd9, 0xf5, 0xa5, 0x1a, 0x47, 0x12, 0xd4, 0xb8, 0xe6, 0x57, 0x16, 0xe2, 0x98, - 0x88, 0xbd, 0x0e, 0x25, 0x3a, 0xa9, 0x0b, 0x2d, 0xd7, 0xe9, 0xad, 0x74, 0x7e, 0x16, 0x4a, 0x52, - 0x01, 0x1c, 0x0a, 0xc7, 0x26, 0x46, 0x55, 0xea, 0x87, 0x43, 0x1c, 0xc3, 0xed, 0x4d, 0x38, 0xcb, - 0x5e, 0xfe, 0x9d, 0x68, 0xdb, 0xd8, 0x63, 0xfd, 0x17, 0xf3, 0x73, 0x42, 0x10, 0xe3, 0x33, 0x33, - 0xa3, 0xf9, 0x0e, 0x8c, 0x49, 0x8a, 0xb1, 0x50, 0x66, 0xff, 0x61, 0x01, 0x1e, 0xaf, 0xd6, 0xb3, - 0x1d, 0x16, 0x5f, 0x85, 0x31, 0xce, 0xa6, 0xd1, 0xa5, 0xed, 0xb4, 0x44, 0xbb, 0xea, 0x5d, 0x6c, - 0x5d, 0x83, 0x61, 0x03, 0x13, 0x5d, 0x84, 0xbc, 0xfb, 0x9e, 0x97, 0x34, 0xc3, 0xad, 0xbe, 0xb5, - 0x86, 0x69, 0x39, 0x05, 0x53, 0x8e, 0x8f, 0x1f, 0xa5, 0x0a, 0xac, 0xb8, 0xbe, 0x37, 0x60, 0xc2, - 0x0d, 0x1b, 0xa1, 0x5b, 0xf5, 0xe8, 0x39, 0x13, 0x3b, 0xbb, 0xc6, 0x4a, 0x02, 0xda, 0x69, 0x05, - 0xc5, 0x09, 0x6c, 0xed, 0x5c, 0x1f, 0x1a, 0x98, 0x6b, 0xec, 0xeb, 0xe9, 0x43, 0x19, 0xe2, 0x36, - 0xfb, 0xba, 0x90, 0x19, 0xb5, 0x09, 0x86, 0x98, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x4a, 0x60, 0x8d, - 0x6d, 0xa7, 0xbd, 0xd0, 0x89, 0xb6, 0x2b, 0x6e, 0xd8, 0xf0, 0xf7, 0x48, 0xb0, 0xcf, 0x84, 0xe7, - 0x62, 0x2c, 0x81, 0x29, 0xc0, 0xd2, 0x8d, 0x85, 0x1a, 0xc5, 0xc4, 0xdd, 0x75, 0x4c, 0xae, 0x10, - 0x4e, 0x82, 0x2b, 0x5c, 0x80, 0x49, 0xd9, 0x4c, 0x9d, 0x84, 0xec, 0x8e, 0x18, 0x65, 0x1d, 0x53, - 0xa6, 0xb6, 0xa2, 0x58, 0x75, 0x2b, 0x89, 0x8f, 0x5e, 0x81, 0x71, 0xd7, 0x73, 0x23, 0xd7, 0x89, - 0xfc, 0x80, 0xdd, 0xb0, 0x5c, 0x4e, 0x66, 0x96, 0x6c, 0x55, 0x1d, 0x80, 0x4d, 0x3c, 0xfb, 0x3f, - 0x15, 0x60, 0x9a, 0x4d, 0xdb, 0xb7, 0x56, 0xd8, 0x47, 0x66, 0x85, 0xdd, 0xe9, 0x5e, 0x61, 0x27, - 0xc1, 0xee, 0x7e, 0x98, 0xcb, 0xec, 0x5d, 0x28, 0x29, 0x5b, 0x60, 0xe9, 0x0c, 0x60, 0x65, 0x38, - 0x03, 0xf4, 0xe7, 0x3e, 0xe4, 0x33, 0x6e, 0x3e, 0xf5, 0x19, 0xf7, 0x6f, 0x59, 0x10, 0x9b, 0x44, - 0xa2, 0x1b, 0x50, 0x6a, 0xfb, 0xcc, 0xec, 0x20, 0x90, 0xb6, 0x3c, 0x8f, 0xa7, 0x5e, 0x54, 0xfc, - 0x52, 0xe4, 0xe3, 0x57, 0x93, 0x35, 0x70, 0x5c, 0x19, 0x2d, 0xc2, 0x48, 0x3b, 0x20, 0xf5, 0x88, - 0xb9, 0xc0, 0xf6, 0xa5, 0xc3, 0xd7, 0x08, 0xc7, 0xc7, 0xb2, 0xa2, 0xfd, 0x0b, 0x16, 0x00, 0x7f, - 0x29, 0x75, 0xbc, 0x2d, 0x72, 0x0a, 0xda, 0xdf, 0x0a, 0x14, 0xc2, 0x36, 0x69, 0xf4, 0x32, 0x08, - 0x89, 0xfb, 0x53, 0x6f, 0x93, 0x46, 0x3c, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, 0xed, 0xef, 0x03, 0x98, - 0x88, 0xd1, 0xaa, 0x11, 0xd9, 0x45, 0xcf, 0x1b, 0x2e, 0x71, 0x17, 0x12, 0x2e, 0x71, 0x25, 0x86, - 0xad, 0x29, 0x1a, 0xdf, 0x85, 0xfc, 0xae, 0xf3, 0x40, 0x68, 0x92, 0x9e, 0xed, 0xdd, 0x0d, 0x4a, - 0x7f, 0x6e, 0xd5, 0x79, 0xc0, 0x65, 0xa6, 0x67, 0xe5, 0x02, 0x59, 0x75, 0x1e, 0x1c, 0x71, 0xb3, - 0x0f, 0x76, 0x48, 0xdd, 0x72, 0xc3, 0xe8, 0xcb, 0xff, 0x31, 0xfe, 0xcf, 0x96, 0x1d, 0x6d, 0x84, - 0xb5, 0xe5, 0x7a, 0xe2, 0xdd, 0x70, 0xa0, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0x0d, 0xd0, 0x96, - 0xeb, 0xa1, 0xf7, 0x61, 0x44, 0xbc, 0xd1, 0x33, 0x5b, 0x6f, 0x53, 0x4b, 0x95, 0xd5, 0x9e, 0x78, - 0xe2, 0xe7, 0x6d, 0xce, 0x4b, 0x99, 0x50, 0x94, 0xf6, 0x6d, 0x57, 0x36, 0x88, 0xfe, 0xba, 0x05, - 0x13, 0xe2, 0x37, 0x26, 0xef, 0x75, 0x48, 0x18, 0x09, 0xde, 0xf3, 0xd3, 0x83, 0xf7, 0x41, 0x54, - 0xe4, 0x5d, 0xf9, 0xb4, 0x3c, 0x66, 0x4d, 0x60, 0xdf, 0x1e, 0x25, 0x7a, 0x81, 0xfe, 0xa1, 0x05, - 0x67, 0x77, 0x9d, 0x07, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0xed, 0xfa, 0xeb, 0x83, - 0x4d, 0x7f, 0x57, 0x75, 0xde, 0x49, 0x69, 0xe6, 0x7a, 0x36, 0x0d, 0xa5, 0x6f, 0x57, 0x53, 0xfb, - 0x35, 0xbb, 0x09, 0x45, 0xb9, 0xde, 0x52, 0x24, 0xef, 0x8a, 0xce, 0x58, 0x1f, 0xdb, 0x44, 0x42, - 0xf7, 0x4b, 0xa3, 0xed, 0x88, 0xb5, 0xf6, 0x48, 0xdb, 0x79, 0x17, 0xc6, 0xf4, 0x35, 0xf6, 0x48, - 0xdb, 0x7a, 0x0f, 0xce, 0xa4, 0xac, 0xa5, 0x47, 0xda, 0xe4, 0x7d, 0xb8, 0x90, 0xb9, 0x3e, 0x1e, - 0x65, 0xc3, 0xf6, 0xcf, 0x5b, 0xfa, 0x39, 0x78, 0x0a, 0x2a, 0xf8, 0x25, 0x53, 0x05, 0x7f, 0xa9, - 0xf7, 0xce, 0xc9, 0xd0, 0xc3, 0xbf, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, 0x4d, 0x18, 0x6e, 0xd1, - 0x12, 0x69, 0x1c, 0x62, 0xf7, 0xdf, 0x91, 0x31, 0x2f, 0xc5, 0xca, 0x43, 0x2c, 0x28, 0xd8, 0xbf, - 0x6c, 0x41, 0xe1, 0x14, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x7c, 0x26, 0x69, 0x11, 0xd2, 0x6c, 0x0e, - 0x3b, 0xf7, 0x97, 0x1f, 0x44, 0xc4, 0x0b, 0x99, 0xa8, 0x98, 0x3a, 0x30, 0xdf, 0x05, 0x67, 0x6e, - 0xf9, 0x4e, 0x73, 0xd1, 0x69, 0x39, 0x5e, 0x83, 0x04, 0x55, 0x6f, 0xab, 0xaf, 0x95, 0x92, 0x6e, - 0x53, 0x94, 0xeb, 0x67, 0x53, 0x64, 0x6f, 0x03, 0xd2, 0x1b, 0x10, 0x76, 0x9c, 0x18, 0x46, 0x5c, - 0xde, 0x94, 0x18, 0xfe, 0xa7, 0xd3, 0xb9, 0xbb, 0xae, 0x9e, 0x69, 0x16, 0x8a, 0xbc, 0x00, 0x4b, - 0x42, 0xf6, 0xab, 0x90, 0xea, 0xbb, 0xd5, 0x5f, 0x6d, 0x60, 0x7f, 0x1e, 0xa6, 0x59, 0xcd, 0x63, - 0x8a, 0xb4, 0x76, 0x42, 0x49, 0x97, 0x12, 0x32, 0xca, 0xfe, 0x8a, 0x05, 0x93, 0x6b, 0x89, 0xf8, - 0x15, 0x57, 0xd8, 0x7b, 0x60, 0x8a, 0x6e, 0xb8, 0xce, 0x4a, 0xb1, 0x80, 0x9e, 0xb8, 0x0e, 0xea, - 0x4f, 0x2d, 0x88, 0xdd, 0x29, 0x4f, 0x81, 0xf1, 0x5a, 0x32, 0x18, 0xaf, 0x54, 0xdd, 0x88, 0xea, - 0x4e, 0x16, 0xdf, 0x85, 0x6e, 0xaa, 0xd8, 0x01, 0x3d, 0xd4, 0x22, 0x31, 0x19, 0xee, 0x69, 0x3e, - 0x61, 0x06, 0x18, 0x90, 0xd1, 0x04, 0x98, 0x29, 0x91, 0xc2, 0xfd, 0x88, 0x98, 0x12, 0xa9, 0xfe, - 0x64, 0xec, 0xd0, 0x9a, 0xd6, 0x65, 0x76, 0x72, 0x7d, 0x3b, 0x33, 0x0d, 0x77, 0x5a, 0xee, 0xfb, - 0x44, 0x05, 0x40, 0x29, 0x0b, 0x53, 0x6f, 0x51, 0x7a, 0x74, 0x50, 0x1e, 0x57, 0xff, 0x78, 0x94, - 0xac, 0xb8, 0x8a, 0x7d, 0x03, 0x26, 0x13, 0x03, 0x86, 0x5e, 0x86, 0xa1, 0xf6, 0xb6, 0x13, 0x92, - 0x84, 0xf9, 0xe4, 0x50, 0x8d, 0x16, 0x1e, 0x1d, 0x94, 0x27, 0x54, 0x05, 0x56, 0x82, 0x39, 0xb6, - 0xfd, 0x3f, 0x2d, 0x28, 0xac, 0xf9, 0xcd, 0xd3, 0x58, 0x4c, 0x6f, 0x18, 0x8b, 0xe9, 0x89, 0xac, - 0x18, 0x83, 0x99, 0xeb, 0x68, 0x25, 0xb1, 0x8e, 0x2e, 0x65, 0x52, 0xe8, 0xbd, 0x84, 0x76, 0x61, - 0x94, 0x45, 0x2e, 0x14, 0xe6, 0x9c, 0x2f, 0x1a, 0x32, 0x40, 0x39, 0x21, 0x03, 0x4c, 0x6a, 0xa8, - 0x9a, 0x24, 0xf0, 0x0c, 0x8c, 0x08, 0x93, 0xc2, 0xa4, 0x11, 0xbc, 0xc0, 0xc5, 0x12, 0x6e, 0xff, - 0x78, 0x1e, 0x8c, 0x48, 0x89, 0xe8, 0x57, 0x2d, 0x98, 0x0b, 0xb8, 0x57, 0x61, 0xb3, 0xd2, 0x09, - 0x5c, 0x6f, 0xab, 0xde, 0xd8, 0x26, 0xcd, 0x4e, 0xcb, 0xf5, 0xb6, 0xaa, 0x5b, 0x9e, 0xaf, 0x8a, - 0x97, 0x1f, 0x90, 0x46, 0x87, 0xbd, 0x0b, 0xf4, 0x09, 0xcb, 0xa8, 0x4c, 0x76, 0xae, 0x1d, 0x1e, - 0x94, 0xe7, 0xf0, 0xb1, 0x68, 0xe3, 0x63, 0xf6, 0x05, 0x7d, 0xdd, 0x82, 0x79, 0x1e, 0x40, 0x70, - 0xf0, 0xfe, 0xf7, 0x90, 0x98, 0x6a, 0x92, 0x54, 0x4c, 0x64, 0x9d, 0x04, 0xbb, 0x8b, 0xaf, 0x88, - 0x01, 0x9d, 0xaf, 0x1d, 0xaf, 0x2d, 0x7c, 0xdc, 0xce, 0xd9, 0xff, 0x22, 0x0f, 0xe3, 0xc2, 0xa1, - 0x5d, 0x44, 0x4a, 0x79, 0xd9, 0x58, 0x12, 0x4f, 0x26, 0x96, 0xc4, 0xb4, 0x81, 0x7c, 0x32, 0x41, - 0x52, 0x42, 0x98, 0x6e, 0x39, 0x61, 0x74, 0x83, 0x38, 0x41, 0xb4, 0x41, 0x1c, 0x6e, 0xca, 0x92, - 0x3f, 0xb6, 0xd9, 0x8d, 0x52, 0xd1, 0xdc, 0x4a, 0x12, 0xc3, 0xdd, 0xf4, 0xd1, 0x1e, 0x20, 0x66, - 0x8f, 0x13, 0x38, 0x5e, 0xc8, 0xbf, 0xc5, 0x15, 0x6f, 0x06, 0xc7, 0x6b, 0x75, 0x56, 0xb4, 0x8a, - 0x6e, 0x75, 0x51, 0xc3, 0x29, 0x2d, 0x68, 0x76, 0x56, 0x43, 0x83, 0xda, 0x59, 0x0d, 0xf7, 0xf1, - 0x34, 0xf1, 0x60, 0xaa, 0x2b, 0x26, 0xc1, 0xdb, 0x50, 0x52, 0xf6, 0x70, 0xe2, 0xd0, 0xe9, 0x1d, - 0xda, 0x23, 0x49, 0x81, 0xab, 0x51, 0x62, 0x5b, 0xcc, 0x98, 0x9c, 0xfd, 0x8f, 0x72, 0x46, 0x83, - 0x7c, 0x12, 0xd7, 0xa0, 0xe8, 0x84, 0xa1, 0xbb, 0xe5, 0x91, 0xa6, 0xd8, 0xb1, 0x1f, 0xcf, 0xda, - 0xb1, 0x46, 0x33, 0xcc, 0x26, 0x71, 0x41, 0xd4, 0xc4, 0x8a, 0x06, 0xba, 0xc1, 0x0d, 0x86, 0xf6, - 0x24, 0xcf, 0x3f, 0x18, 0x35, 0x90, 0x26, 0x45, 0x7b, 0x04, 0x8b, 0xfa, 0xe8, 0x0b, 0xdc, 0xa2, - 0xeb, 0xa6, 0xe7, 0xdf, 0xf7, 0xae, 0xfb, 0xbe, 0xf4, 0x42, 0x1b, 0x8c, 0xe0, 0xb4, 0xb4, 0xe3, - 0x52, 0xd5, 0xb1, 0x49, 0x6d, 0xb0, 0xb8, 0x3d, 0xdf, 0x0d, 0x67, 0x28, 0x69, 0xd3, 0x97, 0x24, - 0x44, 0x04, 0x26, 0x45, 0xb4, 0x04, 0x59, 0x26, 0xc6, 0x2e, 0x95, 0x9d, 0x37, 0x6b, 0xc7, 0x4a, - 0xbf, 0x9b, 0x26, 0x09, 0x9c, 0xa4, 0x69, 0xff, 0x94, 0x05, 0xcc, 0x0a, 0xfe, 0x14, 0x58, 0x86, - 0xcf, 0x9a, 0x2c, 0xc3, 0x4c, 0xd6, 0x20, 0x67, 0x70, 0x0b, 0x2f, 0xf1, 0x95, 0x55, 0x0b, 0xfc, - 0x07, 0xfb, 0xe2, 0x35, 0xbd, 0x3f, 0x27, 0x6b, 0xff, 0x5f, 0x8b, 0x1f, 0x62, 0xca, 0x31, 0x1d, - 0x7d, 0x0f, 0x14, 0x1b, 0x4e, 0xdb, 0x69, 0xf0, 0xb0, 0xbe, 0x99, 0x5a, 0x1d, 0xa3, 0xd2, 0xdc, - 0x92, 0xa8, 0xc1, 0xb5, 0x14, 0x32, 0xea, 0x46, 0x51, 0x16, 0xf7, 0xd5, 0x4c, 0xa8, 0x26, 0x67, - 0x77, 0x60, 0xdc, 0x20, 0xf6, 0x48, 0x45, 0xda, 0xef, 0xe1, 0x57, 0xac, 0x8a, 0x12, 0xb3, 0x0b, - 0xd3, 0x9e, 0xf6, 0x9f, 0x5e, 0x28, 0x52, 0x4c, 0xf9, 0x78, 0xbf, 0x4b, 0x94, 0xdd, 0x3e, 0x9a, - 0x95, 0x7f, 0x82, 0x0c, 0xee, 0xa6, 0x6c, 0xff, 0x84, 0x05, 0x8f, 0xe9, 0x88, 0x5a, 0xcc, 0x80, - 0x7e, 0x7a, 0xe2, 0x0a, 0x14, 0xfd, 0x36, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xad, 0x71, 0x55, 0x0e, - 0xfa, 0x6d, 0x51, 0x7e, 0x24, 0xe2, 0x2b, 0x4a, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0x54, 0x8e, 0x61, - 0x83, 0x11, 0x8a, 0x78, 0x0e, 0xec, 0x0c, 0x60, 0x4f, 0xa6, 0x21, 0x16, 0x10, 0xfb, 0x0f, 0x2d, - 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0xf7, 0x60, 0x6a, 0xd7, 0x89, 0x1a, 0xdb, 0xcb, 0x0f, 0xda, 0x01, - 0x57, 0x8f, 0xcb, 0x71, 0x7a, 0xb6, 0xdf, 0x38, 0x69, 0x1f, 0x19, 0x1b, 0xa9, 0xad, 0x26, 0x88, - 0xe1, 0x2e, 0xf2, 0x68, 0x03, 0x46, 0x59, 0x19, 0xb3, 0x86, 0x0e, 0x7b, 0xb1, 0x06, 0x59, 0xad, - 0xa9, 0x57, 0xe7, 0xd5, 0x98, 0x0e, 0xd6, 0x89, 0xda, 0x5f, 0xce, 0xf3, 0xdd, 0xce, 0xb8, 0xed, - 0x67, 0x60, 0xa4, 0xed, 0x37, 0x97, 0xaa, 0x15, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, - 0x25, 0x1c, 0xbd, 0x06, 0x40, 0x1e, 0x44, 0x24, 0xf0, 0x9c, 0x96, 0x32, 0x1a, 0x51, 0x66, 0x92, - 0x15, 0x7f, 0xcd, 0x8f, 0xee, 0x84, 0xe4, 0xbb, 0x96, 0x15, 0x0a, 0xd6, 0xd0, 0xd1, 0x35, 0x80, - 0x76, 0xe0, 0xef, 0xb9, 0x4d, 0xe6, 0x5e, 0x97, 0x37, 0x4d, 0x2a, 0x6a, 0x0a, 0x82, 0x35, 0x2c, - 0xf4, 0x1a, 0x8c, 0x77, 0xbc, 0x90, 0x73, 0x28, 0xce, 0x86, 0x88, 0x4e, 0x58, 0x8c, 0xad, 0x1b, - 0xee, 0xe8, 0x40, 0x6c, 0xe2, 0xa2, 0x05, 0x18, 0x8e, 0x1c, 0x66, 0x13, 0x31, 0x94, 0x6d, 0xdb, - 0xb8, 0x4e, 0x31, 0xf4, 0xa0, 0xb2, 0xb4, 0x02, 0x16, 0x15, 0xd1, 0xdb, 0xd2, 0x57, 0x81, 0x9f, - 0xf5, 0xc2, 0xa8, 0x78, 0xb0, 0x7b, 0x41, 0xf3, 0x54, 0x10, 0xc6, 0xca, 0x06, 0x2d, 0xfb, 0xeb, - 0x25, 0x80, 0x98, 0x1d, 0x47, 0xef, 0x77, 0x9d, 0x47, 0xcf, 0xf5, 0x66, 0xe0, 0x4f, 0xee, 0x30, - 0x42, 0xdf, 0x6f, 0xc1, 0xa8, 0xd3, 0x6a, 0xf9, 0x0d, 0x27, 0x62, 0xa3, 0x9c, 0xeb, 0x7d, 0x1e, - 0x8a, 0xf6, 0x17, 0xe2, 0x1a, 0xbc, 0x0b, 0x2f, 0xca, 0x85, 0xa7, 0x41, 0xfa, 0xf6, 0x42, 0x6f, - 0x18, 0x7d, 0x4a, 0x4a, 0x69, 0x7c, 0x79, 0xcc, 0x26, 0xa5, 0xb4, 0x12, 0x3b, 0xfa, 0x35, 0x01, - 0x0d, 0xdd, 0x31, 0x02, 0xcf, 0x15, 0xb2, 0x63, 0x30, 0x18, 0x5c, 0x69, 0xbf, 0x98, 0x73, 0xa8, - 0xa6, 0x3b, 0x57, 0x0d, 0x65, 0x07, 0x2a, 0xd1, 0xc4, 0x9f, 0x3e, 0x8e, 0x55, 0xef, 0xc2, 0x64, - 0xd3, 0xbc, 0xdb, 0xc5, 0x6a, 0x7a, 0x3a, 0x8b, 0x6e, 0x82, 0x15, 0x88, 0x6f, 0xf3, 0x04, 0x00, - 0x27, 0x09, 0xa3, 0x1a, 0x77, 0x73, 0xab, 0x7a, 0x9b, 0xbe, 0x30, 0x4e, 0xb7, 0x33, 0xe7, 0x72, - 0x3f, 0x8c, 0xc8, 0x2e, 0xc5, 0x8c, 0x2f, 0xed, 0x35, 0x51, 0x17, 0x2b, 0x2a, 0xe8, 0x4d, 0x18, - 0x66, 0x7e, 0xb2, 0xe1, 0x4c, 0x31, 0x5b, 0x99, 0x68, 0x86, 0x78, 0x88, 0x37, 0x15, 0xfb, 0x1b, - 0x62, 0x41, 0x01, 0xdd, 0x90, 0x71, 0x60, 0xc2, 0xaa, 0x77, 0x27, 0x24, 0x2c, 0x0e, 0x4c, 0x69, - 0xf1, 0xe3, 0x71, 0x88, 0x17, 0x5e, 0x9e, 0x1a, 0x3e, 0xde, 0xa8, 0x49, 0x99, 0x23, 0xf1, 0x5f, - 0x46, 0xa5, 0x9f, 0x81, 0xec, 0xee, 0x99, 0x91, 0xeb, 0xe3, 0xe1, 0xbc, 0x6b, 0x92, 0xc0, 0x49, - 0x9a, 0x94, 0xd1, 0xe4, 0x3b, 0x57, 0x98, 0xb7, 0xf7, 0xdb, 0xff, 0x5c, 0xbe, 0x66, 0x97, 0x0c, - 0x2f, 0xc1, 0xa2, 0xfe, 0xa9, 0xde, 0xfa, 0xb3, 0x1e, 0x4c, 0x25, 0xb7, 0xe8, 0x23, 0xe5, 0x32, - 0xfe, 0xa0, 0x00, 0x13, 0xe6, 0x92, 0x42, 0xf3, 0x50, 0x12, 0x44, 0x54, 0x50, 0x52, 0xb5, 0x4b, - 0x56, 0x25, 0x00, 0xc7, 0x38, 0x2c, 0x16, 0x2d, 0xab, 0xae, 0x99, 0x25, 0xc6, 0xb1, 0x68, 0x15, - 0x04, 0x6b, 0x58, 0x54, 0x5e, 0xda, 0xf0, 0xfd, 0x48, 0x5d, 0x2a, 0x6a, 0xdd, 0x2d, 0xb2, 0x52, - 0x2c, 0xa0, 0xf4, 0x32, 0xd9, 0x21, 0x81, 0x47, 0x5a, 0x66, 0xac, 0x33, 0x75, 0x99, 0xdc, 0xd4, - 0x81, 0xd8, 0xc4, 0xa5, 0xb7, 0xa4, 0x1f, 0xb2, 0x85, 0x2c, 0xa4, 0xb2, 0xd8, 0xcc, 0xb3, 0xce, - 0x3d, 0xce, 0x25, 0x1c, 0x7d, 0x1e, 0x1e, 0x53, 0x0e, 0xe2, 0x98, 0x2b, 0xaa, 0x65, 0x8b, 0xc3, - 0x86, 0x12, 0xe5, 0xb1, 0xa5, 0x74, 0x34, 0x9c, 0x55, 0x1f, 0xbd, 0x01, 0x13, 0x82, 0x73, 0x97, - 0x14, 0x47, 0x4c, 0xdb, 0x89, 0x9b, 0x06, 0x14, 0x27, 0xb0, 0x65, 0xb4, 0x36, 0xc6, 0x3c, 0x4b, - 0x0a, 0xc5, 0xee, 0x68, 0x6d, 0x3a, 0x1c, 0x77, 0xd5, 0x40, 0x0b, 0x30, 0xc9, 0x59, 0x2b, 0xd7, - 0xdb, 0xe2, 0x73, 0x22, 0xbc, 0x4f, 0xd4, 0x96, 0xba, 0x6d, 0x82, 0x71, 0x12, 0x1f, 0xbd, 0x0a, - 0x63, 0x4e, 0xd0, 0xd8, 0x76, 0x23, 0xd2, 0x88, 0x3a, 0x01, 0x77, 0x4b, 0xd1, 0x8c, 0x4f, 0x16, - 0x34, 0x18, 0x36, 0x30, 0xed, 0xf7, 0xe1, 0x4c, 0x8a, 0xe3, 0x1a, 0x5d, 0x38, 0x4e, 0xdb, 0x95, - 0xdf, 0x94, 0x30, 0xd8, 0x5c, 0xa8, 0x55, 0xe5, 0xd7, 0x68, 0x58, 0x74, 0x75, 0x32, 0x07, 0x37, - 0x2d, 0x09, 0x85, 0x5a, 0x9d, 0x2b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x7f, 0xe5, 0x60, 0x32, 0x45, - 0xf9, 0xce, 0x12, 0x21, 0x24, 0x64, 0x8f, 0x38, 0xef, 0x81, 0x19, 0xfc, 0x2f, 0x77, 0x8c, 0xe0, - 0x7f, 0xf9, 0x7e, 0xc1, 0xff, 0x0a, 0x1f, 0x24, 0xf8, 0x9f, 0x39, 0x62, 0x43, 0x03, 0x8d, 0x58, - 0x4a, 0xc0, 0xc0, 0xe1, 0x63, 0x06, 0x0c, 0x34, 0x06, 0x7d, 0x64, 0x80, 0x41, 0xff, 0xe1, 0x1c, - 0x4c, 0x25, 0x8d, 0xe4, 0x4e, 0x41, 0x1d, 0xfb, 0xa6, 0xa1, 0x8e, 0x4d, 0x4f, 0x2b, 0x92, 0x34, - 0xdd, 0xcb, 0x52, 0xcd, 0xe2, 0x84, 0x6a, 0xf6, 0x93, 0x03, 0x51, 0xeb, 0xad, 0xa6, 0xfd, 0x3b, - 0x39, 0x38, 0x97, 0xac, 0xb2, 0xd4, 0x72, 0xdc, 0xdd, 0x53, 0x18, 0x9b, 0xdb, 0xc6, 0xd8, 0x3c, - 0x3f, 0xc8, 0xd7, 0xb0, 0xae, 0x65, 0x0e, 0xd0, 0xbd, 0xc4, 0x00, 0xcd, 0x0f, 0x4e, 0xb2, 0xf7, - 0x28, 0x7d, 0x23, 0x0f, 0x97, 0x52, 0xeb, 0xc5, 0xda, 0xcc, 0x15, 0x43, 0x9b, 0x79, 0x2d, 0xa1, - 0xcd, 0xb4, 0x7b, 0xd7, 0x3e, 0x19, 0xf5, 0xa6, 0xf0, 0x28, 0x64, 0x01, 0xe2, 0x1e, 0x52, 0xb5, - 0x69, 0x78, 0x14, 0x2a, 0x42, 0xd8, 0xa4, 0xfb, 0xcd, 0xa4, 0xd2, 0xfc, 0x57, 0x16, 0x5c, 0x48, - 0x9d, 0x9b, 0x53, 0x50, 0x61, 0xad, 0x99, 0x2a, 0xac, 0x67, 0x06, 0x5e, 0xad, 0x19, 0x3a, 0xad, - 0xdf, 0x2c, 0x64, 0x7c, 0x0b, 0x13, 0xd0, 0x6f, 0xc3, 0xa8, 0xd3, 0x68, 0x90, 0x30, 0x5c, 0xf5, - 0x9b, 0x2a, 0x60, 0xda, 0xf3, 0x4c, 0xce, 0x8a, 0x8b, 0x8f, 0x0e, 0xca, 0xb3, 0x49, 0x12, 0x31, - 0x18, 0xeb, 0x14, 0xcc, 0x18, 0x8f, 0xb9, 0x13, 0x8d, 0xf1, 0x78, 0x0d, 0x60, 0x4f, 0x71, 0xeb, - 0x49, 0x21, 0x5f, 0xe3, 0xe3, 0x35, 0x2c, 0xf4, 0x05, 0x28, 0x86, 0xe2, 0x1a, 0x17, 0x4b, 0xf1, - 0xc5, 0x01, 0xe7, 0xca, 0xd9, 0x20, 0x2d, 0xd3, 0x75, 0x5d, 0xe9, 0x43, 0x14, 0x49, 0xf4, 0x1d, - 0x30, 0x15, 0xf2, 0xc8, 0x28, 0x4b, 0x2d, 0x27, 0x64, 0x7e, 0x10, 0x62, 0x15, 0x32, 0x7f, 0xf4, - 0x7a, 0x02, 0x86, 0xbb, 0xb0, 0xd1, 0x8a, 0xfc, 0x28, 0x16, 0xc6, 0x85, 0x2f, 0xcc, 0x2b, 0xf1, - 0x07, 0x89, 0x34, 0x4c, 0x67, 0x93, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x89, 0xbe, 0x00, 0x40, 0x97, - 0x8f, 0xd0, 0x25, 0x8c, 0x64, 0x1f, 0x9e, 0xf4, 0x54, 0x69, 0xa6, 0x5a, 0x7e, 0x32, 0x5f, 0xbe, - 0x8a, 0x22, 0x82, 0x35, 0x82, 0xf6, 0x0f, 0x17, 0xe0, 0xf1, 0x1e, 0x67, 0x24, 0x5a, 0x30, 0x9f, - 0x40, 0x9f, 0x4d, 0x0a, 0xd7, 0xb3, 0xa9, 0x95, 0x0d, 0x69, 0x3b, 0xb1, 0x14, 0x73, 0x1f, 0x78, - 0x29, 0xfe, 0x80, 0xa5, 0xa9, 0x3d, 0xb8, 0x31, 0xdf, 0x67, 0x8f, 0x79, 0xf6, 0x9f, 0xa0, 0x1e, - 0x64, 0x33, 0x45, 0x99, 0x70, 0x6d, 0xe0, 0xee, 0x0c, 0xac, 0x5d, 0x38, 0x5d, 0xe5, 0xef, 0x97, - 0x2d, 0x78, 0x32, 0xb5, 0xbf, 0x86, 0xc9, 0xc6, 0x3c, 0x94, 0x1a, 0xb4, 0x50, 0x73, 0xdd, 0x8a, - 0x7d, 0x5a, 0x25, 0x00, 0xc7, 0x38, 0x86, 0x65, 0x46, 0xae, 0xaf, 0x65, 0xc6, 0x3f, 0xb7, 0xa0, - 0x6b, 0x7f, 0x9c, 0xc2, 0x41, 0x5d, 0x35, 0x0f, 0xea, 0x8f, 0x0f, 0x32, 0x97, 0x19, 0x67, 0xf4, - 0x7f, 0x9e, 0x84, 0xf3, 0x19, 0xbe, 0x1a, 0x7b, 0x30, 0xbd, 0xd5, 0x20, 0xa6, 0x53, 0x9c, 0xf8, - 0x98, 0x54, 0xff, 0xc1, 0x9e, 0x1e, 0x74, 0x2c, 0x3d, 0xcf, 0x74, 0x17, 0x0a, 0xee, 0x6e, 0x02, - 0x7d, 0xd9, 0x82, 0xb3, 0xce, 0xfd, 0xb0, 0x2b, 0x09, 0xa3, 0x58, 0x33, 0x2f, 0xa5, 0x2a, 0x41, - 0xfa, 0x24, 0x6d, 0xe4, 0xf9, 0x8a, 0xd2, 0xb0, 0x70, 0x6a, 0x5b, 0x08, 0x8b, 0x10, 0x9a, 0x94, - 0x9d, 0xef, 0xe1, 0xb6, 0x99, 0xe6, 0x54, 0xc3, 0x8f, 0x6c, 0x09, 0xc1, 0x8a, 0x0e, 0xba, 0x0b, - 0xa5, 0x2d, 0xe9, 0xe9, 0x26, 0xae, 0x84, 0xd4, 0x3b, 0x36, 0xd5, 0x1d, 0x8e, 0x3f, 0x4b, 0x2a, - 0x10, 0x8e, 0x49, 0xa1, 0x37, 0x20, 0xef, 0x6d, 0x86, 0xbd, 0x12, 0xfd, 0x24, 0x2c, 0x99, 0xb8, - 0x4b, 0xf4, 0xda, 0x4a, 0x1d, 0xd3, 0x8a, 0xe8, 0x06, 0xe4, 0x83, 0x8d, 0xa6, 0xd0, 0xdb, 0xa5, - 0x9e, 0xdc, 0x78, 0xb1, 0x92, 0xbe, 0x48, 0x38, 0x25, 0xbc, 0x58, 0xc1, 0x94, 0x04, 0xaa, 0xc1, - 0x10, 0x73, 0x6b, 0x10, 0xb7, 0x40, 0x2a, 0xbf, 0xdb, 0xc3, 0x3d, 0x88, 0xfb, 0x4d, 0x33, 0x04, - 0xcc, 0x09, 0xa1, 0x75, 0x18, 0x6e, 0xb0, 0xa4, 0x30, 0x22, 0x6a, 0xf3, 0xa7, 0x52, 0x35, 0x74, - 0x3d, 0xb2, 0xe5, 0x08, 0x85, 0x15, 0xc3, 0xc0, 0x82, 0x16, 0xa3, 0x4a, 0xda, 0xdb, 0x9b, 0x21, - 0x93, 0xf0, 0xb3, 0xa8, 0xf6, 0x48, 0x02, 0x25, 0xa8, 0x32, 0x0c, 0x2c, 0x68, 0xa1, 0xcf, 0x40, - 0x6e, 0xb3, 0x21, 0xbc, 0x1e, 0x52, 0x55, 0x75, 0xa6, 0x57, 0xfb, 0xe2, 0xf0, 0xe1, 0x41, 0x39, - 0xb7, 0xb2, 0x84, 0x73, 0x9b, 0x0d, 0xb4, 0x06, 0x23, 0x9b, 0xdc, 0x0f, 0x56, 0x68, 0xe3, 0x9e, - 0x4e, 0x77, 0xd1, 0xed, 0x72, 0x95, 0xe5, 0xd6, 0xfa, 0x02, 0x80, 0x25, 0x11, 0x16, 0x87, 0x52, - 0xf9, 0xf3, 0x8a, 0x80, 0xcc, 0x73, 0xc7, 0xf3, 0xc1, 0xe6, 0xb7, 0x72, 0xec, 0x15, 0x8c, 0x35, - 0x8a, 0xe8, 0x4b, 0x50, 0x72, 0x64, 0xfa, 0x3f, 0x11, 0xb0, 0xe2, 0xc5, 0xd4, 0x8d, 0xd9, 0x3b, - 0x33, 0x22, 0x5f, 0xd5, 0x0a, 0x09, 0xc7, 0x44, 0xd1, 0x0e, 0x8c, 0xef, 0x85, 0xed, 0x6d, 0x22, - 0x37, 0x32, 0x8b, 0x5f, 0x91, 0x71, 0x71, 0xdd, 0x15, 0x88, 0x6e, 0x10, 0x75, 0x9c, 0x56, 0xd7, - 0xd9, 0xc3, 0xde, 0xb2, 0xef, 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x0e, 0xff, 0x7b, 0x1d, 0x7f, 0x63, - 0x3f, 0x22, 0x22, 0x82, 0x73, 0xea, 0xf0, 0xbf, 0xc5, 0x51, 0xba, 0x87, 0x5f, 0x00, 0xb0, 0x24, - 0x42, 0xb7, 0xba, 0x23, 0x53, 0x6b, 0xb2, 0xc8, 0xcd, 0x19, 0x5b, 0x3d, 0x35, 0xff, 0xa6, 0x36, - 0x28, 0xec, 0x8c, 0x8c, 0x49, 0xb1, 0xb3, 0xb1, 0xbd, 0xed, 0x47, 0xbe, 0x97, 0x38, 0x97, 0xa7, - 0xb3, 0xcf, 0xc6, 0x5a, 0x0a, 0x7e, 0xf7, 0xd9, 0x98, 0x86, 0x85, 0x53, 0xdb, 0x42, 0x4d, 0x98, - 0x68, 0xfb, 0x41, 0x74, 0xdf, 0x0f, 0xe4, 0xfa, 0x42, 0x3d, 0xb4, 0x09, 0x06, 0xa6, 0x68, 0x91, - 0x45, 0x14, 0x37, 0x21, 0x38, 0x41, 0x13, 0x7d, 0x0e, 0x46, 0xc2, 0x86, 0xd3, 0x22, 0xd5, 0xdb, - 0x33, 0x67, 0xb2, 0x2f, 0x9d, 0x3a, 0x47, 0xc9, 0x58, 0x5d, 0x6c, 0x72, 0x04, 0x0a, 0x96, 0xe4, - 0xd0, 0x0a, 0x0c, 0xb1, 0xb4, 0x00, 0x2c, 0xf8, 0x74, 0x46, 0x60, 0xa4, 0x2e, 0xbb, 0x52, 0x7e, - 0x36, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0x98, 0x6a, 0x3f, 0x9c, 0x39, 0x97, 0xbd, 0x07, - 0x04, 0x2f, 0x7e, 0xbb, 0xde, 0x6b, 0x0f, 0x28, 0x24, 0x1c, 0x13, 0xa5, 0x27, 0x33, 0x3d, 0x4d, - 0xcf, 0xf7, 0x30, 0x63, 0xc9, 0x3c, 0x4b, 0xd9, 0xc9, 0x4c, 0x4f, 0x52, 0x4a, 0xc2, 0xfe, 0xbd, - 0x91, 0x6e, 0x4e, 0x85, 0x89, 0x61, 0x7f, 0xd1, 0xea, 0x7a, 0xa1, 0xfb, 0xf4, 0xa0, 0x5a, 0xa1, - 0x13, 0xe4, 0x51, 0xbf, 0x6c, 0xc1, 0xf9, 0x76, 0xea, 0x87, 0x88, 0x6b, 0x7f, 0x30, 0xe5, 0x12, - 0xff, 0x74, 0x15, 0x20, 0x3e, 0x1d, 0x8e, 0x33, 0x5a, 0x4a, 0xca, 0x01, 0xf9, 0x0f, 0x2c, 0x07, - 0xac, 0x42, 0x91, 0xb1, 0x96, 0x7d, 0x92, 0xa4, 0x25, 0xc5, 0x21, 0xc6, 0x40, 0x2c, 0x89, 0x8a, - 0x58, 0x91, 0x40, 0x3f, 0x68, 0xc1, 0xc5, 0x64, 0xd7, 0x31, 0x61, 0x60, 0x11, 0x4e, 0x9d, 0x4b, - 0x80, 0x2b, 0xe2, 0xfb, 0x2f, 0xd6, 0x7a, 0x21, 0x1f, 0xf5, 0x43, 0xc0, 0xbd, 0x1b, 0x43, 0x95, - 0x14, 0x11, 0x74, 0xd8, 0x54, 0xbb, 0x0f, 0x20, 0x86, 0xbe, 0x04, 0x63, 0xbb, 0x7e, 0xc7, 0x8b, - 0x84, 0xd5, 0x8b, 0xf0, 0x53, 0x64, 0xcf, 0xcc, 0xab, 0x5a, 0x39, 0x36, 0xb0, 0x12, 0xc2, 0x6b, - 0xf1, 0xa1, 0x85, 0xd7, 0x77, 0x12, 0xa9, 0xb0, 0x4b, 0xd9, 0x61, 0xfb, 0x84, 0x9c, 0x7f, 0x8c, - 0x84, 0xd8, 0xa7, 0x2b, 0x11, 0xfd, 0x8c, 0x95, 0xc2, 0xca, 0x73, 0x19, 0xf9, 0x75, 0x53, 0x46, - 0xbe, 0x92, 0x94, 0x91, 0xbb, 0x54, 0xae, 0x86, 0x78, 0x3c, 0x78, 0xec, 0xe7, 0x41, 0x83, 0xa9, - 0xd9, 0x2d, 0xb8, 0xdc, 0xef, 0x5a, 0x62, 0xe6, 0x4f, 0x4d, 0xf5, 0xc0, 0x16, 0x9b, 0x3f, 0x35, - 0xab, 0x15, 0xcc, 0x20, 0x83, 0x46, 0xdb, 0xb0, 0xff, 0x9b, 0x05, 0xf9, 0x9a, 0xdf, 0x3c, 0x05, - 0x15, 0xf2, 0x67, 0x0d, 0x15, 0xf2, 0xe3, 0x19, 0x29, 0xca, 0x33, 0x15, 0xc6, 0xcb, 0x09, 0x85, - 0xf1, 0xc5, 0x2c, 0x02, 0xbd, 0xd5, 0xc3, 0x3f, 0x99, 0x07, 0x3d, 0xa1, 0x3a, 0xfa, 0xcd, 0x87, - 0xb1, 0x3d, 0xce, 0xf7, 0xca, 0xb1, 0x2e, 0x28, 0x33, 0xab, 0x29, 0xe9, 0x7a, 0xf7, 0x67, 0xcc, - 0x04, 0xf9, 0x1e, 0x71, 0xb7, 0xb6, 0x23, 0xd2, 0x4c, 0x7e, 0xce, 0xe9, 0x99, 0x20, 0xff, 0x17, - 0x0b, 0x26, 0x13, 0xad, 0xa3, 0x16, 0x8c, 0xb7, 0x74, 0xfd, 0x9f, 0x58, 0xa7, 0x0f, 0xa5, 0x3a, - 0x14, 0x26, 0x9c, 0x5a, 0x11, 0x36, 0x89, 0xa3, 0x39, 0x00, 0xf5, 0x3e, 0x27, 0xf5, 0x5e, 0x8c, - 0xeb, 0x57, 0x0f, 0x78, 0x21, 0xd6, 0x30, 0xd0, 0xcb, 0x30, 0x1a, 0xf9, 0x6d, 0xbf, 0xe5, 0x6f, - 0xed, 0xdf, 0x24, 0x32, 0xbe, 0x8b, 0x32, 0xcc, 0x5a, 0x8f, 0x41, 0x58, 0xc7, 0xb3, 0x7f, 0x3a, - 0x0f, 0xc9, 0x24, 0xfc, 0xdf, 0x5a, 0x93, 0x1f, 0xcd, 0x35, 0xf9, 0x0d, 0x0b, 0xa6, 0x68, 0xeb, - 0xcc, 0x48, 0x44, 0x5e, 0xb6, 0x2a, 0x07, 0x8d, 0xd5, 0x23, 0x07, 0xcd, 0x15, 0x7a, 0x76, 0x35, - 0xfd, 0x4e, 0x24, 0xf4, 0x66, 0xda, 0xe1, 0x44, 0x4b, 0xb1, 0x80, 0x0a, 0x3c, 0x12, 0x04, 0xc2, - 0xf3, 0x49, 0xc7, 0x23, 0x41, 0x80, 0x05, 0x54, 0xa6, 0xa8, 0x29, 0x64, 0xa4, 0xa8, 0x61, 0xd1, - 0xea, 0x84, 0x39, 0x81, 0x60, 0x7b, 0xb4, 0x68, 0x75, 0xd2, 0xce, 0x20, 0xc6, 0xb1, 0x7f, 0x3e, - 0x0f, 0x63, 0x35, 0xbf, 0x19, 0xbf, 0x90, 0xbd, 0x64, 0xbc, 0x90, 0x5d, 0x4e, 0xbc, 0x90, 0x4d, - 0xe9, 0xb8, 0xdf, 0x7a, 0x0f, 0xfb, 0xb0, 0xde, 0xc3, 0xfe, 0x99, 0xc5, 0x66, 0xad, 0xb2, 0x56, - 0x17, 0x29, 0x72, 0x5f, 0x80, 0x51, 0x76, 0x20, 0x31, 0x57, 0x3b, 0xf9, 0x6c, 0xc4, 0xa2, 0xcf, - 0xaf, 0xc5, 0xc5, 0x58, 0xc7, 0x41, 0x57, 0xa1, 0x18, 0x12, 0x27, 0x68, 0x6c, 0xab, 0x33, 0x4e, - 0x3c, 0xaa, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x38, 0x50, 0x5a, 0x3e, 0x3b, 0xd9, 0xab, 0xde, - 0x1f, 0xbe, 0x45, 0xb2, 0xa3, 0xa3, 0xd9, 0xf7, 0x00, 0x75, 0xe3, 0x0f, 0x10, 0x12, 0xa9, 0x6c, - 0x86, 0x44, 0x2a, 0x75, 0x85, 0x43, 0xfa, 0x13, 0x0b, 0x26, 0x6a, 0x7e, 0x93, 0x6e, 0xdd, 0x6f, - 0xa6, 0x7d, 0xaa, 0x47, 0x89, 0x1c, 0xee, 0x11, 0x25, 0xf2, 0xef, 0x5a, 0x30, 0x52, 0xf3, 0x9b, - 0xa7, 0xa0, 0x6d, 0x7f, 0xdd, 0xd4, 0xb6, 0x3f, 0x96, 0xb1, 0x24, 0x32, 0x14, 0xec, 0xbf, 0x98, - 0x87, 0x71, 0xda, 0x4f, 0x7f, 0x4b, 0xce, 0x92, 0x31, 0x22, 0xd6, 0x00, 0x23, 0x42, 0xd9, 0x5c, - 0xbf, 0xd5, 0xf2, 0xef, 0x27, 0x67, 0x6c, 0x85, 0x95, 0x62, 0x01, 0x45, 0xcf, 0x41, 0xb1, 0x1d, - 0x90, 0x3d, 0xd7, 0x17, 0xfc, 0xa3, 0xf6, 0x76, 0x51, 0x13, 0xe5, 0x58, 0x61, 0x50, 0xb9, 0x2b, - 0x74, 0xbd, 0x06, 0x91, 0x99, 0xa6, 0x0b, 0x2c, 0x19, 0x15, 0x0f, 0xff, 0xac, 0x95, 0x63, 0x03, - 0x0b, 0xdd, 0x83, 0x12, 0xfb, 0xcf, 0x4e, 0x94, 0xe3, 0x27, 0xcf, 0x11, 0x39, 0x17, 0x04, 0x01, - 0x1c, 0xd3, 0x42, 0xd7, 0x00, 0x22, 0x19, 0x22, 0x38, 0x14, 0x91, 0x6d, 0x14, 0xaf, 0xad, 0x82, - 0x07, 0x87, 0x58, 0xc3, 0x42, 0xcf, 0x42, 0x29, 0x72, 0xdc, 0xd6, 0x2d, 0xd7, 0x23, 0x21, 0x53, - 0x39, 0xe7, 0x65, 0x4a, 0x05, 0x51, 0x88, 0x63, 0x38, 0xe5, 0x75, 0x98, 0xdb, 0x37, 0x4f, 0xbd, - 0x55, 0x64, 0xd8, 0x8c, 0xd7, 0xb9, 0xa5, 0x4a, 0xb1, 0x86, 0x61, 0xbf, 0x0a, 0xe7, 0x6a, 0x7e, - 0xb3, 0xe6, 0x07, 0xd1, 0x8a, 0x1f, 0xdc, 0x77, 0x82, 0xa6, 0x9c, 0xbf, 0xb2, 0x8c, 0xee, 0x4f, - 0xcf, 0x9e, 0x21, 0xbe, 0x33, 0x8d, 0xb8, 0xfd, 0x2f, 0x32, 0x6e, 0xe7, 0x98, 0xae, 0x1c, 0x0d, - 0x76, 0xef, 0xaa, 0x2c, 0x7b, 0xd7, 0x9d, 0x88, 0xa0, 0xdb, 0x2c, 0x33, 0x57, 0x7c, 0x05, 0x89, - 0xea, 0xcf, 0x68, 0x99, 0xb9, 0x62, 0x60, 0xea, 0x9d, 0x65, 0xd6, 0xb7, 0x7f, 0x2d, 0xcf, 0x4e, - 0xa3, 0x44, 0xd2, 0x39, 0xf4, 0x45, 0x98, 0x08, 0xc9, 0x2d, 0xd7, 0xeb, 0x3c, 0x90, 0x42, 0x78, - 0x0f, 0x67, 0x9c, 0xfa, 0xb2, 0x8e, 0xc9, 0x55, 0x79, 0x66, 0x19, 0x4e, 0x50, 0xa3, 0xf3, 0x14, - 0x74, 0xbc, 0x85, 0xf0, 0x4e, 0x48, 0x02, 0x91, 0xf4, 0x8c, 0xcd, 0x13, 0x96, 0x85, 0x38, 0x86, - 0xd3, 0x75, 0xc9, 0xfe, 0xac, 0xf9, 0x1e, 0xf6, 0xfd, 0x48, 0xae, 0x64, 0x96, 0x36, 0x47, 0x2b, - 0xc7, 0x06, 0x16, 0x5a, 0x01, 0x14, 0x76, 0xda, 0xed, 0x16, 0x7b, 0xce, 0x77, 0x5a, 0xd7, 0x03, - 0xbf, 0xd3, 0xe6, 0x6f, 0x9d, 0xf9, 0xc5, 0xf3, 0xf4, 0x0a, 0xab, 0x77, 0x41, 0x71, 0x4a, 0x0d, - 0x7a, 0xfa, 0x6c, 0x86, 0xec, 0x37, 0x5b, 0xdd, 0x79, 0xa1, 0x5e, 0xaf, 0xb3, 0x22, 0x2c, 0x61, - 0x74, 0x31, 0xb1, 0xe6, 0x39, 0xe6, 0x70, 0xbc, 0x98, 0xb0, 0x2a, 0xc5, 0x1a, 0x06, 0x5a, 0x86, - 0x91, 0x70, 0x3f, 0x6c, 0x44, 0x22, 0x0e, 0x53, 0x46, 0xfa, 0xca, 0x3a, 0x43, 0xd1, 0x52, 0x2a, - 0xf0, 0x2a, 0x58, 0xd6, 0xb5, 0xbf, 0x87, 0x5d, 0x86, 0x2c, 0x45, 0x56, 0xd4, 0x09, 0x08, 0xda, - 0x85, 0xf1, 0x36, 0x9b, 0x72, 0x11, 0xc0, 0x59, 0xcc, 0xdb, 0x4b, 0x03, 0x4a, 0xb5, 0xf7, 0xe9, - 0x41, 0xa3, 0xb4, 0x4e, 0x4c, 0x5c, 0xa8, 0xe9, 0xe4, 0xb0, 0x49, 0xdd, 0xfe, 0xd7, 0xd3, 0xec, - 0xcc, 0xad, 0x73, 0x51, 0x75, 0x44, 0x18, 0x14, 0x0b, 0xbe, 0x7c, 0x36, 0x5b, 0x67, 0x12, 0x7f, - 0x91, 0x30, 0x4a, 0xc6, 0xb2, 0x2e, 0x7a, 0x8b, 0xbd, 0x4d, 0xf3, 0x83, 0xae, 0x5f, 0xa6, 0x62, - 0x8e, 0x65, 0x3c, 0x43, 0x8b, 0x8a, 0x58, 0x23, 0x82, 0x6e, 0xc1, 0xb8, 0xc8, 0xa8, 0x24, 0x94, - 0x62, 0x79, 0x43, 0xe9, 0x31, 0x8e, 0x75, 0xe0, 0x51, 0xb2, 0x00, 0x9b, 0x95, 0xd1, 0x16, 0x5c, - 0xd4, 0xd2, 0x0b, 0x5e, 0x0f, 0x1c, 0xf6, 0x5e, 0xe9, 0xb2, 0x4d, 0xa4, 0x9d, 0x9b, 0x4f, 0x1e, - 0x1e, 0x94, 0x2f, 0xae, 0xf7, 0x42, 0xc4, 0xbd, 0xe9, 0xa0, 0xdb, 0x70, 0x8e, 0xfb, 0xed, 0x55, - 0x88, 0xd3, 0x6c, 0xb9, 0x9e, 0x3a, 0x98, 0xf9, 0x3a, 0xbc, 0x70, 0x78, 0x50, 0x3e, 0xb7, 0x90, - 0x86, 0x80, 0xd3, 0xeb, 0xa1, 0xd7, 0xa1, 0xd4, 0xf4, 0x42, 0x31, 0x06, 0xc3, 0x46, 0xe6, 0xcc, - 0x52, 0x65, 0xad, 0xae, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a, 0x68, 0x8b, 0x2b, 0xc6, 0x94, 0x1c, - 0x3a, 0x92, 0x9d, 0x25, 0x5d, 0x2c, 0x09, 0xc3, 0x73, 0x87, 0x6b, 0x84, 0x95, 0xe5, 0xab, 0xe1, - 0xd4, 0x63, 0x10, 0x46, 0x6f, 0x02, 0xa2, 0x8c, 0x9a, 0xdb, 0x20, 0x0b, 0x0d, 0x16, 0x47, 0x9b, - 0xe9, 0x11, 0x8b, 0x86, 0xa7, 0x04, 0xaa, 0x77, 0x61, 0xe0, 0x94, 0x5a, 0xe8, 0x06, 0x3d, 0xc8, - 0xf4, 0x52, 0x61, 0xc1, 0x2b, 0x99, 0xfb, 0x99, 0x0a, 0x69, 0x07, 0xa4, 0xe1, 0x44, 0xa4, 0x69, - 0x52, 0xc4, 0x89, 0x7a, 0xf4, 0x2e, 0x55, 0x29, 0x75, 0xc0, 0x0c, 0x96, 0xd1, 0x9d, 0x56, 0x87, - 0xca, 0xc5, 0xdb, 0x7e, 0x18, 0xad, 0x91, 0xe8, 0xbe, 0x1f, 0xec, 0x88, 0xd8, 0x64, 0x71, 0x98, - 0xcc, 0x18, 0x84, 0x75, 0x3c, 0xca, 0x07, 0xb3, 0xc7, 0xe1, 0x6a, 0x85, 0xbd, 0xd0, 0x15, 0xe3, - 0x7d, 0x72, 0x83, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xd5, 0xda, 0x12, 0x7b, 0x6d, 0x4b, 0xa0, 0x56, - 0x6b, 0x4b, 0x58, 0xc2, 0x11, 0xe9, 0xce, 0x4a, 0x3a, 0x91, 0xad, 0xd5, 0xec, 0xbe, 0x0e, 0x06, - 0x4c, 0x4c, 0xea, 0xc1, 0x94, 0xca, 0x87, 0xca, 0x83, 0xb6, 0x85, 0x33, 0x93, 0x6c, 0x91, 0x0c, - 0x1e, 0xf1, 0x4d, 0xe9, 0x89, 0xab, 0x09, 0x4a, 0xb8, 0x8b, 0xb6, 0x11, 0xbe, 0x64, 0xaa, 0x6f, - 0x4a, 0xa4, 0x79, 0x28, 0x85, 0x9d, 0x8d, 0xa6, 0xbf, 0xeb, 0xb8, 0x1e, 0x7b, 0x1c, 0xd3, 0x98, - 0xac, 0xba, 0x04, 0xe0, 0x18, 0x07, 0xad, 0x40, 0xd1, 0x91, 0x4a, 0x60, 0x94, 0x1d, 0xab, 0x40, - 0xa9, 0x7e, 0xb9, 0xfb, 0xae, 0x54, 0xfb, 0xaa, 0xba, 0xe8, 0x35, 0x18, 0x17, 0xde, 0x5a, 0x3c, - 0x82, 0x03, 0x7b, 0xbc, 0xd2, 0xcc, 0xf1, 0xeb, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x0b, 0x30, 0x41, - 0xa9, 0xc4, 0x07, 0xdb, 0xcc, 0xd9, 0x41, 0x4e, 0x44, 0x2d, 0xd5, 0x85, 0x5e, 0x19, 0x27, 0x88, - 0xa1, 0x26, 0x3c, 0xe1, 0x74, 0x22, 0x9f, 0x29, 0xd2, 0xcd, 0xf5, 0xbf, 0xee, 0xef, 0x10, 0x8f, - 0xbd, 0x61, 0x15, 0x17, 0x2f, 0x1f, 0x1e, 0x94, 0x9f, 0x58, 0xe8, 0x81, 0x87, 0x7b, 0x52, 0x41, - 0x77, 0x60, 0x34, 0xf2, 0x5b, 0xcc, 0x30, 0x9e, 0xb2, 0x12, 0xe7, 0xb3, 0xc3, 0xff, 0xac, 0x2b, - 0x34, 0x5d, 0x89, 0xa4, 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe7, 0x7b, 0x8c, 0x05, 0x46, 0x25, 0xe1, - 0xcc, 0x63, 0xd9, 0x03, 0xa3, 0xe2, 0xa7, 0x9a, 0x5b, 0x50, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x1d, - 0xa6, 0xdb, 0x81, 0xeb, 0xb3, 0x85, 0xad, 0x1e, 0x31, 0x66, 0xcc, 0xec, 0x06, 0xb5, 0x24, 0x02, - 0xee, 0xae, 0x43, 0x85, 0x4c, 0x59, 0x38, 0x73, 0x81, 0xa7, 0xca, 0xe2, 0x8c, 0x37, 0x2f, 0xc3, - 0x0a, 0x8a, 0x56, 0xd9, 0xb9, 0xcc, 0xc5, 0xc1, 0x99, 0xd9, 0xec, 0x18, 0x0f, 0xba, 0xd8, 0xc8, - 0xf9, 0x25, 0xf5, 0x17, 0xc7, 0x14, 0xe8, 0xbd, 0x11, 0x6e, 0x3b, 0x01, 0xa9, 0x05, 0x7e, 0x83, - 0xf0, 0xce, 0x70, 0x9b, 0xfc, 0xc7, 0x79, 0xfc, 0x46, 0x7a, 0x6f, 0xd4, 0xd3, 0x10, 0x70, 0x7a, - 0x3d, 0xd4, 0xd4, 0x32, 0x44, 0x53, 0x36, 0x34, 0x9c, 0x79, 0xa2, 0x87, 0x99, 0x51, 0x82, 0x67, - 0x8d, 0xd7, 0xa2, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x1d, 0x30, 0x25, 0xc2, 0x1d, 0xc5, 0xe3, - 0x7e, 0x31, 0xb6, 0x5f, 0xc4, 0x09, 0x18, 0xee, 0xc2, 0x9e, 0xfd, 0x76, 0x98, 0xee, 0xba, 0x71, - 0x8e, 0x15, 0x7c, 0xfc, 0x8f, 0x87, 0xa0, 0xa4, 0x94, 0xe9, 0x68, 0xde, 0x7c, 0x23, 0xb9, 0x90, - 0x7c, 0x23, 0x29, 0x52, 0x9e, 0x5e, 0x7f, 0x16, 0x59, 0x37, 0xcc, 0xea, 0x72, 0xd9, 0xa9, 0xbe, - 0x74, 0xae, 0xbc, 0xaf, 0x8b, 0x9e, 0xa6, 0x1b, 0xc9, 0x0f, 0xfc, 0xd8, 0x52, 0xe8, 0xa9, 0x6e, - 0x19, 0x30, 0xd3, 0x2e, 0x7a, 0x8a, 0x0a, 0x36, 0xcd, 0x6a, 0x2d, 0x99, 0x7a, 0xb2, 0x46, 0x0b, - 0x31, 0x87, 0x31, 0x01, 0x90, 0xb2, 0x47, 0x4c, 0x00, 0x1c, 0x79, 0x48, 0x01, 0x50, 0x12, 0xc0, - 0x31, 0x2d, 0xd4, 0x82, 0xe9, 0x86, 0x99, 0x35, 0x54, 0xb9, 0xe5, 0x3d, 0xd5, 0x37, 0x7f, 0x67, - 0x47, 0x4b, 0xd1, 0xb6, 0x94, 0xa4, 0x82, 0xbb, 0x09, 0xa3, 0xd7, 0xa0, 0xf8, 0x9e, 0x1f, 0xb2, - 0xc5, 0x24, 0x78, 0x04, 0xe9, 0xbe, 0x54, 0x7c, 0xeb, 0x76, 0x9d, 0x95, 0x1f, 0x1d, 0x94, 0x47, - 0x6b, 0x7e, 0x53, 0xfe, 0xc5, 0xaa, 0x02, 0x7a, 0x00, 0xe7, 0x8c, 0x93, 0x55, 0x75, 0x17, 0x06, - 0xef, 0xee, 0x45, 0xd1, 0xdc, 0xb9, 0x6a, 0x1a, 0x25, 0x9c, 0xde, 0x00, 0x3d, 0xae, 0x3c, 0x5f, - 0x64, 0xdc, 0x95, 0x7c, 0x08, 0x63, 0x37, 0x4a, 0xba, 0xf3, 0x7a, 0x02, 0x01, 0x77, 0xd7, 0xb1, - 0x7f, 0x85, 0xbf, 0x3d, 0x08, 0x0d, 0x25, 0x09, 0x3b, 0xad, 0xd3, 0x48, 0xe8, 0xb4, 0x6c, 0x28, - 0x4f, 0x1f, 0xfa, 0x7d, 0xeb, 0x37, 0x2c, 0xf6, 0xbe, 0xb5, 0x4e, 0x76, 0xdb, 0x2d, 0x2a, 0x27, - 0x3f, 0xfa, 0x8e, 0xbf, 0x05, 0xc5, 0x48, 0xb4, 0xd6, 0x2b, 0x07, 0x95, 0xd6, 0x29, 0xf6, 0xc6, - 0xa7, 0x38, 0x14, 0x59, 0x8a, 0x15, 0x19, 0xfb, 0x9f, 0xf0, 0x19, 0x90, 0x90, 0x53, 0x50, 0x64, - 0x55, 0x4c, 0x45, 0x56, 0xb9, 0xcf, 0x17, 0x64, 0x28, 0xb4, 0xfe, 0xb1, 0xd9, 0x6f, 0x26, 0x0c, - 0x7e, 0xd4, 0x1f, 0x56, 0xed, 0x1f, 0xb1, 0xe0, 0x6c, 0x9a, 0x25, 0x12, 0xe5, 0x2a, 0xb9, 0x28, - 0xaa, 0x1e, 0x9a, 0xd5, 0x08, 0xde, 0x15, 0xe5, 0x58, 0x61, 0x0c, 0x9c, 0xde, 0xe1, 0x78, 0xf1, - 0xdd, 0x6e, 0xc3, 0x78, 0x2d, 0x20, 0xda, 0x1d, 0xf0, 0x06, 0xf7, 0x83, 0xe3, 0xfd, 0x79, 0xee, - 0xd8, 0x3e, 0x70, 0xf6, 0xcf, 0xe6, 0xe0, 0x2c, 0x7f, 0x29, 0x5a, 0xd8, 0xf3, 0xdd, 0x66, 0xcd, - 0x6f, 0x8a, 0xd4, 0x1c, 0x6f, 0xc3, 0x58, 0x5b, 0xd3, 0x1f, 0xf4, 0x8a, 0x30, 0xa5, 0xeb, 0x19, - 0x62, 0x39, 0x4e, 0x2f, 0xc5, 0x06, 0x2d, 0xd4, 0x84, 0x31, 0xb2, 0xe7, 0x36, 0xd4, 0x73, 0x43, - 0xee, 0xd8, 0x77, 0x83, 0x6a, 0x65, 0x59, 0xa3, 0x83, 0x0d, 0xaa, 0x8f, 0x20, 0x5b, 0x9b, 0xfd, - 0xa3, 0x16, 0x3c, 0x96, 0x11, 0x8f, 0x8a, 0x36, 0x77, 0x9f, 0xbd, 0xc9, 0x89, 0xc4, 0x4f, 0xaa, - 0x39, 0xfe, 0x52, 0x87, 0x05, 0x14, 0x7d, 0x0e, 0x80, 0xbf, 0xb4, 0x51, 0xb1, 0xa6, 0x5f, 0xe0, - 0x1e, 0x23, 0xe6, 0x88, 0x16, 0x2b, 0x42, 0xd6, 0xc7, 0x1a, 0x2d, 0xfb, 0xa7, 0xf2, 0x30, 0xc4, - 0x5e, 0x76, 0xd0, 0x0a, 0x8c, 0x6c, 0xf3, 0x08, 0xcd, 0x83, 0x04, 0x83, 0x8e, 0xe5, 0x43, 0x5e, - 0x80, 0x65, 0x65, 0xb4, 0x0a, 0x67, 0x78, 0x84, 0xeb, 0x56, 0x85, 0xb4, 0x9c, 0x7d, 0xa9, 0x66, - 0xe0, 0xc9, 0x92, 0x54, 0xdc, 0x8b, 0x6a, 0x37, 0x0a, 0x4e, 0xab, 0x87, 0xde, 0x80, 0x09, 0xca, - 0x97, 0xf9, 0x9d, 0x48, 0x52, 0xe2, 0xb1, 0xad, 0x15, 0x23, 0xb8, 0x6e, 0x40, 0x71, 0x02, 0x9b, - 0x0a, 0x4c, 0xed, 0x2e, 0x85, 0xca, 0x50, 0x2c, 0x30, 0x99, 0x4a, 0x14, 0x13, 0x97, 0x99, 0x20, - 0x75, 0x98, 0xc1, 0xd5, 0xfa, 0x76, 0x40, 0xc2, 0x6d, 0xbf, 0xd5, 0x14, 0xb9, 0xb6, 0x63, 0x13, - 0xa4, 0x04, 0x1c, 0x77, 0xd5, 0xa0, 0x54, 0x36, 0x1d, 0xb7, 0xd5, 0x09, 0x48, 0x4c, 0x65, 0xd8, - 0xa4, 0xb2, 0x92, 0x80, 0xe3, 0xae, 0x1a, 0x74, 0x1d, 0x9d, 0x13, 0xc9, 0xaf, 0xa5, 0x37, 0xbe, - 0xb2, 0x2b, 0x1b, 0x91, 0x7e, 0x49, 0x3d, 0xc2, 0xd1, 0x08, 0xcb, 0x1b, 0x95, 0x3e, 0x5b, 0xd3, - 0x03, 0x0a, 0x8f, 0x24, 0x49, 0xe5, 0x61, 0x52, 0x30, 0xff, 0x9e, 0x05, 0x67, 0x52, 0xec, 0x57, - 0xf9, 0x51, 0xb5, 0xe5, 0x86, 0x91, 0x4a, 0x08, 0xa3, 0x1d, 0x55, 0xbc, 0x1c, 0x2b, 0x0c, 0xba, - 0x1f, 0xf8, 0x61, 0x98, 0x3c, 0x00, 0x85, 0x7d, 0x98, 0x80, 0x1e, 0xef, 0x00, 0x44, 0x97, 0xa1, - 0xd0, 0x09, 0x89, 0x0c, 0x24, 0xa5, 0xce, 0x6f, 0xa6, 0x19, 0x66, 0x10, 0xca, 0x9a, 0x6e, 0x29, - 0xa5, 0xac, 0xc6, 0x9a, 0x72, 0x4d, 0x2b, 0x87, 0xd9, 0x5f, 0xcd, 0xc3, 0x85, 0x4c, 0x4b, 0x75, - 0xda, 0xa5, 0x5d, 0xdf, 0x73, 0x23, 0x5f, 0xbd, 0x1a, 0xf2, 0x50, 0x26, 0xa4, 0xbd, 0xbd, 0x2a, - 0xca, 0xb1, 0xc2, 0x40, 0x57, 0x64, 0x1a, 0xf6, 0x64, 0xca, 0x9b, 0xc5, 0x8a, 0x91, 0x89, 0x7d, - 0xd0, 0x74, 0x62, 0x4f, 0x41, 0xa1, 0xed, 0xfb, 0xad, 0xe4, 0x61, 0x44, 0xbb, 0xeb, 0xfb, 0x2d, - 0xcc, 0x80, 0xe8, 0x13, 0x62, 0x1c, 0x12, 0xcf, 0x64, 0xd8, 0x69, 0xfa, 0xa1, 0x36, 0x18, 0xcf, - 0xc0, 0xc8, 0x0e, 0xd9, 0x0f, 0x5c, 0x6f, 0x2b, 0xf9, 0x7c, 0x7a, 0x93, 0x17, 0x63, 0x09, 0x37, - 0x33, 0x3e, 0x8c, 0x9c, 0x74, 0x1e, 0xb0, 0x62, 0xdf, 0xab, 0xed, 0x07, 0xf2, 0x30, 0x89, 0x17, - 0x2b, 0xdf, 0x9a, 0x88, 0x3b, 0xdd, 0x13, 0x71, 0xd2, 0x79, 0xc0, 0xfa, 0xcf, 0xc6, 0x2f, 0x5a, - 0x30, 0xc9, 0xa2, 0x22, 0x8b, 0x00, 0x1a, 0xae, 0xef, 0x9d, 0x02, 0xeb, 0xf6, 0x14, 0x0c, 0x05, - 0xb4, 0xd1, 0x64, 0x72, 0x1f, 0xd6, 0x13, 0xcc, 0x61, 0xe8, 0x09, 0x28, 0xb0, 0x2e, 0xd0, 0xc9, - 0x1b, 0xe3, 0x79, 0x11, 0x2a, 0x4e, 0xe4, 0x60, 0x56, 0xca, 0xbc, 0xc2, 0x31, 0x69, 0xb7, 0x5c, - 0xde, 0xe9, 0xf8, 0x49, 0xe2, 0xa3, 0xe1, 0x15, 0x9e, 0xda, 0xb5, 0x0f, 0xe6, 0x15, 0x9e, 0x4e, - 0xb2, 0xb7, 0x58, 0xf4, 0xdf, 0x73, 0x70, 0x29, 0xb5, 0xde, 0xc0, 0x5e, 0xe1, 0xbd, 0x6b, 0x9f, - 0x8c, 0x15, 0x4c, 0xba, 0x71, 0x4a, 0xfe, 0x14, 0x8d, 0x53, 0x0a, 0x83, 0x72, 0x8e, 0x43, 0x03, - 0x38, 0x6b, 0xa7, 0x0e, 0xd9, 0x47, 0xc4, 0x59, 0x3b, 0xb5, 0x6f, 0x19, 0x62, 0xdd, 0x9f, 0xe6, - 0x32, 0xbe, 0x85, 0x09, 0x78, 0x57, 0xe9, 0x39, 0xc3, 0x80, 0xa1, 0xe0, 0x84, 0xc7, 0xf8, 0x19, - 0xc3, 0xcb, 0xb0, 0x82, 0x22, 0x57, 0x73, 0x7b, 0xce, 0x65, 0xa7, 0x7e, 0xcc, 0x6c, 0x6a, 0xce, - 0x7c, 0x41, 0x52, 0x43, 0x90, 0xe2, 0x02, 0xbd, 0xaa, 0x09, 0xe5, 0xf9, 0xc1, 0x85, 0xf2, 0xb1, - 0x74, 0x81, 0x1c, 0x2d, 0xc0, 0xe4, 0xae, 0xeb, 0xb1, 0x54, 0xfe, 0x26, 0x2b, 0xaa, 0xa2, 0x80, - 0xac, 0x9a, 0x60, 0x9c, 0xc4, 0x9f, 0x7d, 0x0d, 0xc6, 0x1f, 0x5e, 0x1d, 0xf9, 0x8d, 0x3c, 0x3c, - 0xde, 0x63, 0xdb, 0xf3, 0xb3, 0xde, 0x98, 0x03, 0xed, 0xac, 0xef, 0x9a, 0x87, 0x1a, 0x9c, 0xdd, - 0xec, 0xb4, 0x5a, 0xfb, 0xcc, 0xfe, 0x93, 0x34, 0x25, 0x86, 0xe0, 0x15, 0x9f, 0x90, 0x99, 0x28, - 0x56, 0x52, 0x70, 0x70, 0x6a, 0x4d, 0xf4, 0x26, 0x20, 0x5f, 0xe4, 0x9d, 0xbd, 0x4e, 0x3c, 0xa1, - 0x97, 0x67, 0x03, 0x9f, 0x8f, 0x37, 0xe3, 0xed, 0x2e, 0x0c, 0x9c, 0x52, 0x8b, 0x32, 0xfd, 0xf4, - 0x56, 0xda, 0x57, 0xdd, 0x4a, 0x30, 0xfd, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x87, 0x69, 0x67, - 0xcf, 0x71, 0x79, 0x74, 0x3c, 0x49, 0x80, 0x73, 0xfd, 0x4a, 0x09, 0xb6, 0x90, 0x44, 0xc0, 0xdd, - 0x75, 0x12, 0x8e, 0xd1, 0xc3, 0xd9, 0x8e, 0xd1, 0xbd, 0xcf, 0xc5, 0x7e, 0x3a, 0x5d, 0xfb, 0x3f, - 0x58, 0xf4, 0xfa, 0x4a, 0xc9, 0x1d, 0x4f, 0xc7, 0x41, 0xe9, 0x26, 0x35, 0x1f, 0xe5, 0x73, 0x9a, - 0x85, 0x47, 0x0c, 0xc4, 0x26, 0x2e, 0x5f, 0x10, 0x61, 0xec, 0x24, 0x63, 0xb0, 0xee, 0x22, 0xc6, - 0x81, 0xc2, 0x40, 0x9f, 0x87, 0x91, 0xa6, 0xbb, 0xe7, 0x86, 0x7e, 0x20, 0x36, 0xcb, 0x31, 0x5d, - 0x0d, 0xe2, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0x81, 0x1c, 0x8c, 0xcb, 0x16, 0xdf, - 0xea, 0xf8, 0x91, 0x73, 0x0a, 0xd7, 0xf2, 0x75, 0xe3, 0x5a, 0xfe, 0x44, 0xaf, 0x40, 0x0f, 0xac, - 0x4b, 0x99, 0xd7, 0xf1, 0xed, 0xc4, 0x75, 0xfc, 0x74, 0x7f, 0x52, 0xbd, 0xaf, 0xe1, 0x7f, 0x6a, - 0xc1, 0xb4, 0x81, 0x7f, 0x0a, 0xb7, 0xc1, 0x8a, 0x79, 0x1b, 0x3c, 0xd9, 0xf7, 0x1b, 0x32, 0x6e, - 0x81, 0xef, 0xcb, 0x27, 0xfa, 0xce, 0x4e, 0xff, 0xf7, 0xa0, 0xb0, 0xed, 0x04, 0xcd, 0x5e, 0x01, - 0x65, 0xbb, 0x2a, 0xcd, 0xdd, 0x70, 0x82, 0x26, 0x3f, 0xc3, 0x9f, 0x53, 0xd9, 0x2a, 0x9d, 0xa0, - 0xd9, 0xd7, 0x27, 0x8c, 0x35, 0x85, 0x5e, 0x85, 0xe1, 0xb0, 0xe1, 0xb7, 0x95, 0xc5, 0xe6, 0x65, - 0x9e, 0xc9, 0x92, 0x96, 0x1c, 0x1d, 0x94, 0x91, 0xd9, 0x1c, 0x2d, 0xc6, 0x02, 0x1f, 0xbd, 0x0d, - 0xe3, 0xec, 0x97, 0xb2, 0x5c, 0xc8, 0x67, 0xa7, 0x31, 0xa8, 0xeb, 0x88, 0xdc, 0x00, 0xc6, 0x28, - 0xc2, 0x26, 0xa9, 0xd9, 0x2d, 0x28, 0xa9, 0xcf, 0x7a, 0xa4, 0xbe, 0x3c, 0xff, 0x36, 0x0f, 0x67, - 0x52, 0xd6, 0x1c, 0x0a, 0x8d, 0x99, 0x78, 0x61, 0xc0, 0xa5, 0xfa, 0x01, 0xe7, 0x22, 0x64, 0xd2, - 0x50, 0x53, 0xac, 0xad, 0x81, 0x1b, 0xbd, 0x13, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x7f, 0xa3, 0xb4, - 0xb1, 0x53, 0x1b, 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x23, 0x9d, 0xd3, 0x3f, 0xca, 0xc3, 0xd9, 0xb4, - 0xd8, 0x33, 0xe8, 0xbb, 0x13, 0x29, 0x6d, 0x5e, 0x1a, 0x34, 0x6a, 0x0d, 0xcf, 0x73, 0x23, 0x12, - 0x34, 0xcf, 0x99, 0x49, 0x6e, 0xfa, 0x0e, 0xb3, 0x68, 0x93, 0x39, 0x80, 0x06, 0x3c, 0x15, 0x91, - 0x3c, 0x3e, 0x3e, 0x3d, 0x70, 0x07, 0x44, 0x0e, 0xa3, 0x30, 0xe1, 0x00, 0x2a, 0x8b, 0xfb, 0x3b, - 0x80, 0xca, 0x96, 0x67, 0x5d, 0x18, 0xd5, 0xbe, 0xe6, 0x91, 0xce, 0xf8, 0x0e, 0xbd, 0xad, 0xb4, - 0x7e, 0x3f, 0xd2, 0x59, 0xff, 0x51, 0x0b, 0x12, 0xe6, 0x91, 0x4a, 0xdd, 0x65, 0x65, 0xaa, 0xbb, - 0x2e, 0x43, 0x21, 0xf0, 0x5b, 0x24, 0x99, 0x41, 0x06, 0xfb, 0x2d, 0x82, 0x19, 0x84, 0x62, 0x44, - 0xb1, 0xb2, 0x63, 0x4c, 0x17, 0xe4, 0x84, 0x88, 0xf6, 0x14, 0x0c, 0xb5, 0xc8, 0x1e, 0x69, 0x25, - 0xc3, 0xb3, 0xdf, 0xa2, 0x85, 0x98, 0xc3, 0xec, 0x5f, 0x2c, 0xc0, 0xc5, 0x9e, 0x2e, 0xd4, 0x54, - 0x1c, 0xda, 0x72, 0x22, 0x72, 0xdf, 0xd9, 0x4f, 0xc6, 0x51, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0x33, - 0x8b, 0x71, 0x1e, 0x37, 0x31, 0xa1, 0x1c, 0x14, 0xe1, 0x12, 0x05, 0xf4, 0x11, 0x24, 0xa7, 0xbf, - 0x06, 0x10, 0x86, 0xad, 0x65, 0x8f, 0x72, 0x77, 0x4d, 0x61, 0x8a, 0x1e, 0xc7, 0xd7, 0xac, 0xdf, - 0x12, 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x4c, 0xb5, 0x03, 0x3f, 0xe2, 0xba, 0xd6, 0x0a, 0x37, 0x14, - 0x1a, 0x32, 0xbd, 0x57, 0x6b, 0x09, 0x38, 0xee, 0xaa, 0x81, 0x5e, 0x86, 0x51, 0xe1, 0xd1, 0x5a, - 0xf3, 0xfd, 0x96, 0x50, 0x03, 0x29, 0xb3, 0x93, 0x7a, 0x0c, 0xc2, 0x3a, 0x9e, 0x56, 0x8d, 0x29, - 0x70, 0x47, 0x52, 0xab, 0x71, 0x25, 0xae, 0x86, 0x97, 0x88, 0x43, 0x55, 0x1c, 0x28, 0x0e, 0x55, - 0xac, 0x18, 0x2b, 0x0d, 0xfc, 0x66, 0x05, 0x7d, 0x55, 0x49, 0x3f, 0x57, 0x80, 0x33, 0x62, 0xe1, - 0x3c, 0xea, 0xe5, 0xf2, 0x88, 0x52, 0xe8, 0x7f, 0x6b, 0xcd, 0x9c, 0xf6, 0x9a, 0xf9, 0x41, 0x0b, - 0x4c, 0xf6, 0x0a, 0xfd, 0xb9, 0xcc, 0x40, 0xf4, 0x2f, 0x67, 0xb2, 0x6b, 0x4d, 0x79, 0x81, 0x7c, - 0xc0, 0x90, 0xf4, 0xf6, 0xbf, 0xb7, 0xe0, 0xc9, 0xbe, 0x14, 0xd1, 0x32, 0x94, 0x18, 0x0f, 0xa8, - 0x49, 0x67, 0x4f, 0x2b, 0x43, 0x42, 0x09, 0xc8, 0x60, 0x49, 0xe3, 0x9a, 0x68, 0xb9, 0x2b, 0xe2, - 0xff, 0x33, 0x29, 0x11, 0xff, 0xcf, 0x19, 0xc3, 0xf3, 0x90, 0x21, 0xff, 0x7f, 0x25, 0x0f, 0xc3, - 0x7c, 0xc5, 0x9f, 0x82, 0x18, 0xb6, 0x22, 0xf4, 0xb6, 0x3d, 0x22, 0x51, 0xf1, 0xbe, 0xcc, 0x55, - 0x9c, 0xc8, 0xe1, 0x6c, 0x82, 0xba, 0xad, 0x62, 0x0d, 0x2f, 0x9a, 0x33, 0xee, 0xb3, 0xd9, 0x84, - 0x62, 0x12, 0x38, 0x0d, 0xed, 0x76, 0xfb, 0x22, 0x40, 0xc8, 0xb2, 0xe5, 0x53, 0x1a, 0x22, 0xa6, - 0xd9, 0x27, 0x7b, 0xb4, 0x5e, 0x57, 0xc8, 0xbc, 0x0f, 0xf1, 0x4e, 0x57, 0x00, 0xac, 0x51, 0x9c, - 0x7d, 0x05, 0x4a, 0x0a, 0xb9, 0x9f, 0x16, 0x67, 0x4c, 0x67, 0x2e, 0x3e, 0x0b, 0x93, 0x89, 0xb6, - 0x8e, 0xa5, 0x04, 0xfa, 0x25, 0x0b, 0x26, 0x79, 0x97, 0x97, 0xbd, 0x3d, 0x71, 0xa6, 0xbe, 0x0f, - 0x67, 0x5b, 0x29, 0x67, 0x9b, 0x98, 0xd1, 0xc1, 0xcf, 0x42, 0xa5, 0xf4, 0x49, 0x83, 0xe2, 0xd4, - 0x36, 0xd0, 0x55, 0xba, 0x6e, 0xe9, 0xd9, 0xe5, 0xb4, 0x84, 0xf7, 0xd1, 0x18, 0x5f, 0xb3, 0xbc, - 0x0c, 0x2b, 0xa8, 0xfd, 0x3b, 0x16, 0x4c, 0xf3, 0x9e, 0xdf, 0x24, 0xfb, 0x6a, 0x87, 0x7f, 0x98, - 0x7d, 0x17, 0x49, 0x38, 0x72, 0x19, 0x49, 0x38, 0xf4, 0x4f, 0xcb, 0xf7, 0xfc, 0xb4, 0x9f, 0xb5, - 0x40, 0xac, 0xc0, 0x53, 0x10, 0xe5, 0xbf, 0xdd, 0x14, 0xe5, 0x67, 0xb3, 0x17, 0x75, 0x86, 0x0c, - 0xff, 0x27, 0x16, 0x4c, 0x71, 0x84, 0xf8, 0x2d, 0xf9, 0x43, 0x9d, 0x87, 0x41, 0xb2, 0xe9, 0xa9, - 0x14, 0xdb, 0xe9, 0x1f, 0x65, 0x4c, 0x56, 0xa1, 0xe7, 0x64, 0x35, 0xe5, 0x06, 0x3a, 0x46, 0x26, - 0xc9, 0x63, 0x07, 0xb3, 0xb6, 0xff, 0xd0, 0x02, 0xc4, 0x9b, 0x31, 0xd8, 0x1f, 0xca, 0x54, 0xb0, - 0x52, 0xed, 0xba, 0x88, 0x8f, 0x1a, 0x05, 0xc1, 0x1a, 0xd6, 0x89, 0x0c, 0x4f, 0xc2, 0x20, 0x20, - 0xdf, 0xdf, 0x20, 0xe0, 0x18, 0x23, 0xfa, 0x7f, 0x0a, 0x90, 0x74, 0x07, 0x40, 0x77, 0x61, 0xac, - 0xe1, 0xb4, 0x9d, 0x0d, 0xb7, 0xe5, 0x46, 0x2e, 0x09, 0x7b, 0x59, 0x12, 0x2d, 0x69, 0x78, 0xe2, - 0xa9, 0x57, 0x2b, 0xc1, 0x06, 0x1d, 0x34, 0x07, 0xd0, 0x0e, 0xdc, 0x3d, 0xb7, 0x45, 0xb6, 0x98, - 0xc6, 0x81, 0xf9, 0x3b, 0x72, 0xf3, 0x18, 0x59, 0x8a, 0x35, 0x8c, 0x14, 0xd7, 0xb5, 0xfc, 0xa3, - 0x73, 0x5d, 0x2b, 0x1c, 0xd3, 0x75, 0x6d, 0x68, 0x20, 0xd7, 0x35, 0x0c, 0xe7, 0x25, 0x8b, 0x44, - 0xff, 0xaf, 0xb8, 0x2d, 0x22, 0xf8, 0x62, 0xee, 0x05, 0x39, 0x7b, 0x78, 0x50, 0x3e, 0x8f, 0x53, - 0x31, 0x70, 0x46, 0x4d, 0xf4, 0x39, 0x98, 0x71, 0x5a, 0x2d, 0xff, 0xbe, 0x1a, 0xb5, 0xe5, 0xb0, - 0xe1, 0xb4, 0xb8, 0xc6, 0x7e, 0x84, 0x51, 0x7d, 0xe2, 0xf0, 0xa0, 0x3c, 0xb3, 0x90, 0x81, 0x83, - 0x33, 0x6b, 0x27, 0x3c, 0xdf, 0x8a, 0x7d, 0x3d, 0xdf, 0x5e, 0x87, 0x52, 0x3b, 0xf0, 0x1b, 0xab, - 0x9a, 0x37, 0xce, 0x25, 0x96, 0xa7, 0x5e, 0x16, 0x1e, 0x1d, 0x94, 0xc7, 0xd5, 0x1f, 0x76, 0xc3, - 0xc7, 0x15, 0xec, 0x1d, 0x38, 0x53, 0x27, 0x81, 0xcb, 0x32, 0x60, 0x36, 0xe3, 0x0d, 0xbd, 0x0e, - 0xa5, 0x20, 0x71, 0x84, 0x0d, 0x14, 0x58, 0x49, 0x8b, 0xf2, 0x2b, 0x8f, 0xac, 0x98, 0x90, 0xfd, - 0xc7, 0x16, 0x8c, 0x08, 0x87, 0x86, 0x53, 0xe0, 0x9c, 0x16, 0x0c, 0x05, 0x76, 0x39, 0xfd, 0x98, - 0x67, 0x9d, 0xc9, 0x54, 0x5d, 0x57, 0x13, 0xaa, 0xeb, 0x27, 0x7b, 0x11, 0xe9, 0xad, 0xb4, 0xfe, - 0x9b, 0x79, 0x98, 0x30, 0x9d, 0x39, 0x4e, 0x61, 0x08, 0xd6, 0x60, 0x24, 0x14, 0x9e, 0x43, 0xb9, - 0x6c, 0xcb, 0xe9, 0xe4, 0x24, 0xc6, 0x66, 0x51, 0xc2, 0x57, 0x48, 0x12, 0x49, 0x75, 0x49, 0xca, - 0x3f, 0x42, 0x97, 0xa4, 0x7e, 0xfe, 0x34, 0x85, 0x93, 0xf0, 0xa7, 0xb1, 0xbf, 0xc6, 0xae, 0x1a, - 0xbd, 0xfc, 0x14, 0xb8, 0x90, 0xeb, 0xe6, 0xa5, 0x64, 0xf7, 0x58, 0x59, 0xa2, 0x53, 0x19, 0xdc, - 0xc8, 0x2f, 0x58, 0x70, 0x31, 0xe5, 0xab, 0x34, 0xd6, 0xe4, 0x39, 0x28, 0x3a, 0x9d, 0xa6, 0xab, - 0xf6, 0xb2, 0xf6, 0x8c, 0xb5, 0x20, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x30, 0x4d, 0x1e, 0xb4, 0x5d, - 0xfe, 0x8e, 0xa8, 0xdb, 0x2e, 0xe6, 0x79, 0x88, 0xd9, 0xe5, 0x24, 0x10, 0x77, 0xe3, 0x2b, 0x77, - 0xec, 0x7c, 0xa6, 0x3b, 0xf6, 0x3f, 0xb0, 0x60, 0x54, 0x74, 0xfb, 0x14, 0x46, 0xfb, 0x3b, 0xcc, - 0xd1, 0x7e, 0xbc, 0xc7, 0x68, 0x67, 0x0c, 0xf3, 0xdf, 0xce, 0xa9, 0xfe, 0xd6, 0xfc, 0x20, 0x1a, - 0x80, 0xe5, 0x79, 0x15, 0x8a, 0xed, 0xc0, 0x8f, 0xfc, 0x86, 0xdf, 0x12, 0x1c, 0xcf, 0x13, 0x71, - 0xb4, 0x00, 0x5e, 0x7e, 0xa4, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x9e, 0x1f, 0x44, 0x82, 0xcb, 0x88, - 0x47, 0xcf, 0x0f, 0x22, 0xcc, 0x20, 0xa8, 0x09, 0x10, 0x39, 0xc1, 0x16, 0x89, 0x68, 0x99, 0x08, - 0x3c, 0x92, 0x7d, 0x78, 0x74, 0x22, 0xb7, 0x35, 0xe7, 0x7a, 0x51, 0x18, 0x05, 0x73, 0x55, 0x2f, - 0xba, 0x1d, 0x70, 0x01, 0x4a, 0x73, 0xff, 0x57, 0xb4, 0xb0, 0x46, 0x57, 0xfa, 0x68, 0xb2, 0x36, - 0x86, 0xcc, 0x07, 0xf1, 0x35, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x0a, 0xbb, 0x4a, 0xd8, 0x00, 0x1d, - 0xcf, 0x33, 0xff, 0xeb, 0x45, 0x35, 0xb4, 0xec, 0x35, 0xac, 0xa2, 0xfb, 0xff, 0xf7, 0x3e, 0xb9, - 0x69, 0xc3, 0xba, 0x1f, 0x4d, 0x1c, 0x24, 0x00, 0x7d, 0x67, 0x97, 0x9d, 0xc4, 0xf3, 0x7d, 0xae, - 0x80, 0x63, 0x58, 0x46, 0xb0, 0xb0, 0xd7, 0x2c, 0x3c, 0x70, 0xb5, 0x26, 0x16, 0xb9, 0x16, 0xf6, - 0x5a, 0x00, 0x70, 0x8c, 0x83, 0xe6, 0x85, 0xf8, 0x5d, 0x30, 0x92, 0xdf, 0x49, 0xf1, 0x5b, 0x7e, - 0xbe, 0x26, 0x7f, 0xbf, 0x00, 0xa3, 0x2a, 0x09, 0x5e, 0x8d, 0xe7, 0x12, 0x13, 0x61, 0x58, 0x96, - 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x75, 0x98, 0x0c, 0xb9, 0xee, 0x45, 0x45, 0xdb, 0xe3, 0x3a, 0xac, - 0x4f, 0x4a, 0xfb, 0x8a, 0xba, 0x09, 0x3e, 0x62, 0x45, 0xfc, 0xe8, 0x90, 0x8e, 0x96, 0x49, 0x12, - 0xe8, 0x0d, 0x98, 0x68, 0xe9, 0xe9, 0xe6, 0x6b, 0x42, 0xc5, 0xa5, 0xcc, 0x8f, 0x8d, 0x64, 0xf4, - 0x35, 0x9c, 0xc0, 0xa6, 0x9c, 0x92, 0x5e, 0x22, 0x22, 0x44, 0x3a, 0xde, 0x16, 0x09, 0x45, 0x0a, - 0x2f, 0xc6, 0x29, 0xdd, 0xca, 0xc0, 0xc1, 0x99, 0xb5, 0xd1, 0xab, 0x30, 0x26, 0x3f, 0x5f, 0x73, - 0x23, 0x8e, 0x8d, 0xdc, 0x35, 0x18, 0x36, 0x30, 0xd1, 0x7d, 0x38, 0x27, 0xff, 0xaf, 0x07, 0xce, - 0xe6, 0xa6, 0xdb, 0x10, 0x5e, 0xdc, 0xdc, 0xd3, 0x67, 0x41, 0xba, 0x0e, 0x2d, 0xa7, 0x21, 0x1d, - 0x1d, 0x94, 0x2f, 0x8b, 0x51, 0x4b, 0x85, 0xb3, 0x49, 0x4c, 0xa7, 0x8f, 0x56, 0xe1, 0xcc, 0x36, - 0x71, 0x5a, 0xd1, 0xf6, 0xd2, 0x36, 0x69, 0xec, 0xc8, 0x4d, 0xc4, 0x9c, 0x93, 0x35, 0xd3, 0xf0, - 0x1b, 0xdd, 0x28, 0x38, 0xad, 0x1e, 0x7a, 0x07, 0x66, 0xda, 0x9d, 0x8d, 0x96, 0x1b, 0x6e, 0xaf, - 0xf9, 0x11, 0x33, 0xe9, 0x50, 0x39, 0xe4, 0x84, 0x17, 0xb3, 0x72, 0xcc, 0xae, 0x65, 0xe0, 0xe1, - 0x4c, 0x0a, 0xe8, 0x7d, 0x38, 0x97, 0x58, 0x0c, 0xc2, 0xa7, 0x72, 0x22, 0x3b, 0xde, 0x6e, 0x3d, - 0xad, 0x82, 0xf0, 0x91, 0x4c, 0x03, 0xe1, 0xf4, 0x26, 0x3e, 0x98, 0xa1, 0xcf, 0x7b, 0xb4, 0xb2, - 0xc6, 0x94, 0xa1, 0x2f, 0xc1, 0x98, 0xbe, 0x8a, 0xc4, 0x05, 0x73, 0x25, 0x9d, 0x67, 0xd1, 0x56, - 0x1b, 0x67, 0xe9, 0xd4, 0x8a, 0xd2, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0x48, 0xff, 0x3e, 0x74, 0x0b, - 0x8a, 0x8d, 0x96, 0x4b, 0xbc, 0xa8, 0x5a, 0xeb, 0x15, 0xf4, 0x63, 0x49, 0xe0, 0x88, 0x01, 0x13, - 0x01, 0x4a, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0xcf, 0x41, 0xb9, 0x4f, 0xb4, 0xdb, 0x84, 0x3e, - 0xda, 0x1a, 0x48, 0x1f, 0xbd, 0x20, 0x33, 0xe2, 0xad, 0x25, 0x84, 0xf4, 0x44, 0xb6, 0xbb, 0x58, - 0x54, 0x4f, 0xe2, 0x0f, 0x6c, 0x1f, 0xac, 0xab, 0xb4, 0x0b, 0x7d, 0x2d, 0xd7, 0x8d, 0xa7, 0xac, - 0xa1, 0xc1, 0x05, 0x91, 0xcc, 0x67, 0x09, 0xfb, 0x6b, 0x39, 0x38, 0xa7, 0x86, 0xf0, 0x9b, 0x77, - 0xe0, 0xee, 0x74, 0x0f, 0xdc, 0x09, 0x3c, 0xea, 0xd8, 0xb7, 0x61, 0x98, 0x07, 0x4d, 0x19, 0x80, - 0x01, 0x7a, 0xca, 0x8c, 0xb0, 0xa5, 0xae, 0x69, 0x23, 0xca, 0xd6, 0x5f, 0xb2, 0x60, 0x72, 0x7d, - 0xa9, 0x56, 0xf7, 0x1b, 0x3b, 0x24, 0x5a, 0xe0, 0x0c, 0x2b, 0x16, 0xfc, 0x8f, 0xf5, 0x90, 0x7c, - 0x4d, 0x1a, 0xc7, 0x74, 0x19, 0x0a, 0xdb, 0x7e, 0x18, 0x25, 0x5f, 0x7c, 0x6f, 0xf8, 0x61, 0x84, - 0x19, 0xc4, 0xfe, 0x5d, 0x0b, 0x86, 0x58, 0x1e, 0xd7, 0x7e, 0xc9, 0x85, 0x07, 0xf9, 0x2e, 0xf4, - 0x32, 0x0c, 0x93, 0xcd, 0x4d, 0xd2, 0x88, 0xc4, 0xac, 0x4a, 0x77, 0xd4, 0xe1, 0x65, 0x56, 0x4a, - 0x2f, 0x7d, 0xd6, 0x18, 0xff, 0x8b, 0x05, 0x32, 0xba, 0x07, 0xa5, 0xc8, 0xdd, 0x25, 0x0b, 0xcd, - 0xa6, 0x78, 0x33, 0x7b, 0x08, 0xef, 0xdf, 0x75, 0x49, 0x00, 0xc7, 0xb4, 0xec, 0xaf, 0xe6, 0x00, - 0x62, 0xd7, 0xff, 0x7e, 0x9f, 0xb8, 0xd8, 0xf5, 0x9a, 0x72, 0x25, 0xe5, 0x35, 0x05, 0xc5, 0x04, - 0x53, 0x9e, 0x52, 0xd4, 0x30, 0xe5, 0x07, 0x1a, 0xa6, 0xc2, 0x71, 0x86, 0x69, 0x09, 0xa6, 0xe3, - 0xd0, 0x05, 0x66, 0x1c, 0x17, 0x26, 0xa4, 0xac, 0x27, 0x81, 0xb8, 0x1b, 0xdf, 0x26, 0x70, 0x59, - 0x46, 0xd4, 0x94, 0x77, 0x0d, 0x33, 0xc9, 0x3c, 0x46, 0x9e, 0xe9, 0xf8, 0xb9, 0x28, 0x97, 0xf9, - 0x5c, 0xf4, 0x13, 0x16, 0x9c, 0x4d, 0xb6, 0xc3, 0x7c, 0xdf, 0xbe, 0x62, 0xc1, 0x39, 0xf6, 0x68, - 0xc6, 0x5a, 0xed, 0x7e, 0xa2, 0x7b, 0x29, 0x3d, 0xa4, 0x43, 0xef, 0x1e, 0xc7, 0x7e, 0xcf, 0xab, - 0x69, 0xa4, 0x71, 0x7a, 0x8b, 0xf6, 0x57, 0x2c, 0xb8, 0x90, 0x99, 0x3e, 0x08, 0x5d, 0x85, 0xa2, - 0xd3, 0x76, 0xb9, 0x46, 0x4a, 0xec, 0x77, 0x26, 0x3d, 0xd6, 0xaa, 0x5c, 0x1f, 0xa5, 0xa0, 0x2a, - 0xad, 0x61, 0x2e, 0x33, 0xad, 0x61, 0xdf, 0x2c, 0x85, 0xf6, 0xf7, 0x5b, 0x20, 0xdc, 0x9d, 0x06, - 0x38, 0x64, 0xde, 0x96, 0x59, 0x61, 0x8d, 0x60, 0xe6, 0x97, 0xb3, 0xfd, 0xbf, 0x44, 0x08, 0x73, - 0x75, 0xa9, 0x1b, 0x81, 0xcb, 0x0d, 0x5a, 0x76, 0x13, 0x04, 0xb4, 0x42, 0x98, 0xce, 0xaa, 0x7f, - 0x6f, 0xae, 0x01, 0x34, 0x19, 0xae, 0x96, 0x1b, 0x52, 0x5d, 0x21, 0x15, 0x05, 0xc1, 0x1a, 0x96, - 0xfd, 0x43, 0x39, 0x18, 0x95, 0xc1, 0xb3, 0x3b, 0xde, 0x20, 0x92, 0xe5, 0xb1, 0x72, 0xe8, 0xb0, - 0x64, 0xaa, 0x94, 0x70, 0x2d, 0x16, 0xc8, 0xe3, 0x64, 0xaa, 0x12, 0x80, 0x63, 0x1c, 0xf4, 0x0c, - 0x8c, 0x84, 0x9d, 0x0d, 0x86, 0x9e, 0x70, 0xe2, 0xa9, 0xf3, 0x62, 0x2c, 0xe1, 0xe8, 0x73, 0x30, - 0xc5, 0xeb, 0x05, 0x7e, 0xdb, 0xd9, 0xe2, 0xea, 0xcf, 0x21, 0xe5, 0x55, 0x3b, 0xb5, 0x9a, 0x80, - 0x1d, 0x1d, 0x94, 0xcf, 0x26, 0xcb, 0x98, 0xe2, 0xbc, 0x8b, 0x8a, 0xfd, 0x25, 0x40, 0xdd, 0xf1, - 0xc0, 0xd1, 0x9b, 0xdc, 0x94, 0xca, 0x0d, 0x48, 0xb3, 0x97, 0x46, 0x5c, 0x77, 0x02, 0x95, 0x86, - 0xf4, 0xbc, 0x16, 0x56, 0xf5, 0xed, 0xbf, 0x9a, 0x87, 0xa9, 0xa4, 0x4b, 0x20, 0xba, 0x01, 0xc3, - 0xfc, 0xb2, 0x13, 0xe4, 0x7b, 0x3c, 0xb8, 0x6a, 0x8e, 0x84, 0x6c, 0xdb, 0x8b, 0xfb, 0x52, 0xd4, - 0x47, 0xef, 0xc0, 0x68, 0xd3, 0xbf, 0xef, 0xdd, 0x77, 0x82, 0xe6, 0x42, 0xad, 0x2a, 0xd6, 0x65, - 0x2a, 0xcf, 0x5c, 0x89, 0xd1, 0x74, 0xe7, 0x44, 0xf6, 0xb8, 0x10, 0x83, 0xb0, 0x4e, 0x0e, 0xad, - 0xb3, 0x18, 0x87, 0x9b, 0xee, 0xd6, 0xaa, 0xd3, 0xee, 0x65, 0x57, 0xbb, 0x24, 0x91, 0x34, 0xca, - 0xe3, 0x22, 0x10, 0x22, 0x07, 0xe0, 0x98, 0x10, 0xfa, 0x6e, 0x38, 0x13, 0x66, 0xa8, 0xd9, 0xb2, - 0xd2, 0x43, 0xf4, 0xd2, 0x3c, 0x2d, 0x3e, 0x46, 0xa5, 0x99, 0x34, 0x85, 0x5c, 0x5a, 0x33, 0xf6, - 0x97, 0xcf, 0x80, 0xb1, 0x1b, 0x8d, 0x1c, 0x41, 0xd6, 0x09, 0xe5, 0x08, 0xc2, 0x50, 0x24, 0xbb, - 0xed, 0x68, 0xbf, 0xe2, 0x06, 0xbd, 0x72, 0xd8, 0x2d, 0x0b, 0x9c, 0x6e, 0x9a, 0x12, 0x82, 0x15, - 0x9d, 0xf4, 0x44, 0x4e, 0xf9, 0x0f, 0x31, 0x91, 0x53, 0xe1, 0x14, 0x13, 0x39, 0xad, 0xc1, 0xc8, - 0x96, 0x1b, 0x61, 0xd2, 0xf6, 0x05, 0x9b, 0x99, 0xba, 0x0e, 0xaf, 0x73, 0x94, 0xee, 0xe4, 0x21, - 0x02, 0x80, 0x25, 0x11, 0xf4, 0xa6, 0xda, 0x81, 0xc3, 0xd9, 0x52, 0x5a, 0xf7, 0xcb, 0x60, 0xea, - 0x1e, 0x14, 0x89, 0x9b, 0x46, 0x1e, 0x36, 0x71, 0xd3, 0x8a, 0x4c, 0xb7, 0x54, 0xcc, 0x36, 0x82, - 0x67, 0xd9, 0x94, 0xfa, 0x24, 0x59, 0x32, 0x12, 0x53, 0x95, 0x4e, 0x2e, 0x31, 0xd5, 0xf7, 0x5b, - 0x70, 0xae, 0x9d, 0x96, 0xa3, 0x4d, 0x24, 0x49, 0x7a, 0x79, 0xe0, 0x24, 0x74, 0x46, 0x83, 0x4c, - 0x5c, 0x4f, 0x45, 0xc3, 0xe9, 0xcd, 0xd1, 0x81, 0x0e, 0x36, 0x9a, 0x22, 0xb3, 0xd2, 0x53, 0x19, - 0x19, 0xae, 0x7a, 0xe4, 0xb5, 0x5a, 0x4f, 0xc9, 0xa6, 0xf4, 0xf1, 0xac, 0x6c, 0x4a, 0x03, 0xe7, - 0x50, 0x7a, 0x53, 0xe5, 0xb6, 0x1a, 0xcf, 0x5e, 0x4a, 0x3c, 0x73, 0x55, 0xdf, 0x8c, 0x56, 0x6f, - 0xaa, 0x8c, 0x56, 0x3d, 0x62, 0xbd, 0xf1, 0x7c, 0x55, 0x7d, 0xf3, 0x58, 0x69, 0xb9, 0xa8, 0x26, - 0x4f, 0x26, 0x17, 0x95, 0x71, 0xd5, 0xf0, 0x74, 0x48, 0xcf, 0xf6, 0xb9, 0x6a, 0x0c, 0xba, 0xbd, - 0x2f, 0x1b, 0x9e, 0x77, 0x6b, 0xfa, 0xa1, 0xf2, 0x6e, 0xdd, 0xd5, 0xf3, 0x58, 0xa1, 0x3e, 0x89, - 0x9a, 0x28, 0xd2, 0x80, 0xd9, 0xab, 0xee, 0xea, 0x17, 0xe0, 0x99, 0x6c, 0xba, 0xea, 0x9e, 0xeb, - 0xa6, 0x9b, 0x7a, 0x05, 0x76, 0x65, 0xc5, 0x3a, 0x7b, 0x3a, 0x59, 0xb1, 0xce, 0x9d, 0x78, 0x56, - 0xac, 0xf3, 0xa7, 0x90, 0x15, 0xeb, 0xb1, 0x0f, 0x35, 0x2b, 0xd6, 0xcc, 0x23, 0xc8, 0x8a, 0xb5, - 0x16, 0x67, 0xc5, 0xba, 0x90, 0x3d, 0x25, 0x29, 0x96, 0xb9, 0x19, 0xb9, 0xb0, 0xee, 0xb2, 0xe7, - 0x79, 0x1e, 0xb3, 0x42, 0x04, 0xa3, 0x4b, 0xcf, 0xfb, 0x9b, 0x16, 0xd8, 0x82, 0x4f, 0x89, 0x02, - 0xe1, 0x98, 0x14, 0xa5, 0x1b, 0xe7, 0xc6, 0x7a, 0xbc, 0x87, 0x42, 0x36, 0x4d, 0xd5, 0x95, 0x9d, - 0x11, 0xcb, 0xfe, 0xcb, 0x39, 0xb8, 0xd4, 0x7b, 0x5d, 0xc7, 0x7a, 0xb2, 0x5a, 0xfc, 0xae, 0x93, - 0xd0, 0x93, 0x71, 0x21, 0x27, 0xc6, 0x1a, 0x38, 0xb0, 0xcf, 0x75, 0x98, 0x56, 0x26, 0xb9, 0x2d, - 0xb7, 0xb1, 0xaf, 0xe5, 0x03, 0x56, 0xae, 0x87, 0xf5, 0x24, 0x02, 0xee, 0xae, 0x83, 0x16, 0x60, - 0xd2, 0x28, 0xac, 0x56, 0x84, 0x30, 0xa3, 0x14, 0x73, 0x75, 0x13, 0x8c, 0x93, 0xf8, 0xf6, 0xcf, - 0x58, 0xf0, 0x58, 0x46, 0xc2, 0x88, 0x81, 0xe3, 0xd6, 0x6c, 0xc2, 0x64, 0xdb, 0xac, 0xda, 0x27, - 0xbc, 0x95, 0x91, 0x96, 0x42, 0xf5, 0x35, 0x01, 0xc0, 0x49, 0xa2, 0x8b, 0x57, 0x7f, 0xeb, 0xf7, - 0x2f, 0x7d, 0xec, 0xb7, 0x7f, 0xff, 0xd2, 0xc7, 0x7e, 0xe7, 0xf7, 0x2f, 0x7d, 0xec, 0xcf, 0x1f, - 0x5e, 0xb2, 0x7e, 0xeb, 0xf0, 0x92, 0xf5, 0xdb, 0x87, 0x97, 0xac, 0xdf, 0x39, 0xbc, 0x64, 0xfd, - 0xde, 0xe1, 0x25, 0xeb, 0xab, 0x7f, 0x70, 0xe9, 0x63, 0x6f, 0xe7, 0xf6, 0x5e, 0xf8, 0xff, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x20, 0x56, 0xf9, 0x8e, 0x0d, 0xe7, 0x00, 0x00, + // 12835 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x70, 0x64, 0x57, + 0x56, 0xd8, 0xbe, 0xee, 0xd6, 0x47, 0x1f, 0x7d, 0xdf, 0x99, 0xb1, 0x35, 0xb2, 0x67, 0x7a, 0xfc, + 0xbc, 0x3b, 0x1e, 0xaf, 0x6d, 0xcd, 0x7a, 0x6c, 0xaf, 0xcd, 0xda, 0x6b, 0x90, 0xd4, 0xd2, 0x4c, + 0x7b, 0x46, 0x9a, 0xf6, 0x6d, 0xcd, 0x78, 0xd7, 0x78, 0x97, 0x7d, 0xea, 0xbe, 0x92, 0x9e, 0xf5, + 0xf4, 0x5e, 0xfb, 0xbd, 0xd7, 0x9a, 0x91, 0x03, 0x55, 0xc9, 0x12, 0x48, 0x36, 0x50, 0xa9, 0xad, + 0xb0, 0x95, 0x0f, 0xa0, 0x48, 0x15, 0x21, 0x05, 0x84, 0x24, 0x15, 0x02, 0x01, 0xc2, 0x42, 0x42, + 0x20, 0x3f, 0xc8, 0x9f, 0x0d, 0x49, 0x55, 0x6a, 0xa9, 0xa2, 0xa2, 0x80, 0x48, 0x25, 0xc5, 0x8f, + 0x40, 0x2a, 0xe4, 0x47, 0x50, 0xa8, 0x90, 0xba, 0x9f, 0xef, 0xde, 0xd7, 0xef, 0x75, 0xb7, 0xc6, + 0x1a, 0xd9, 0x50, 0xfb, 0xaf, 0xfb, 0x9e, 0x73, 0xcf, 0xbd, 0xef, 0x7e, 0x9e, 0x73, 0xee, 0xf9, + 0x80, 0x57, 0x77, 0x5e, 0x89, 0xe6, 0xdd, 0xe0, 0xea, 0x4e, 0x67, 0x83, 0x84, 0x3e, 0x89, 0x49, + 0x74, 0x75, 0x8f, 0xf8, 0xad, 0x20, 0xbc, 0x2a, 0x00, 0x4e, 0xdb, 0xbd, 0xda, 0x0c, 0x42, 0x72, + 0x75, 0xef, 0xf9, 0xab, 0x5b, 0xc4, 0x27, 0xa1, 0x13, 0x93, 0xd6, 0x7c, 0x3b, 0x0c, 0xe2, 0x00, + 0x21, 0x8e, 0x33, 0xef, 0xb4, 0xdd, 0x79, 0x8a, 0x33, 0xbf, 0xf7, 0xfc, 0xdc, 0x73, 0x5b, 0x6e, + 0xbc, 0xdd, 0xd9, 0x98, 0x6f, 0x06, 0xbb, 0x57, 0xb7, 0x82, 0xad, 0xe0, 0x2a, 0x43, 0xdd, 0xe8, + 0x6c, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xbd, 0x98, 0x34, 0xb3, 0xeb, 0x34, 0xb7, + 0x5d, 0x9f, 0x84, 0xfb, 0x57, 0xdb, 0x3b, 0x5b, 0xac, 0xdd, 0x90, 0x44, 0x41, 0x27, 0x6c, 0x92, + 0x74, 0xc3, 0x3d, 0x6b, 0x45, 0x57, 0x77, 0x49, 0xec, 0x64, 0x74, 0x77, 0xee, 0x6a, 0x5e, 0xad, + 0xb0, 0xe3, 0xc7, 0xee, 0x6e, 0x77, 0x33, 0x9f, 0xee, 0x57, 0x21, 0x6a, 0x6e, 0x93, 0x5d, 0xa7, + 0xab, 0xde, 0x0b, 0x79, 0xf5, 0x3a, 0xb1, 0xeb, 0x5d, 0x75, 0xfd, 0x38, 0x8a, 0xc3, 0x74, 0x25, + 0xfb, 0x9b, 0x16, 0x5c, 0x5a, 0x78, 0xab, 0xb1, 0xec, 0x39, 0x51, 0xec, 0x36, 0x17, 0xbd, 0xa0, + 0xb9, 0xd3, 0x88, 0x83, 0x90, 0xdc, 0x0d, 0xbc, 0xce, 0x2e, 0x69, 0xb0, 0x81, 0x40, 0xcf, 0xc2, + 0xe8, 0x1e, 0xfb, 0x5f, 0xab, 0xce, 0x5a, 0x97, 0xac, 0x2b, 0xe5, 0xc5, 0xe9, 0xdf, 0x3c, 0xa8, + 0x7c, 0xec, 0xf0, 0xa0, 0x32, 0x7a, 0x57, 0x94, 0x63, 0x85, 0x81, 0x2e, 0xc3, 0xf0, 0x66, 0xb4, + 0xbe, 0xdf, 0x26, 0xb3, 0x05, 0x86, 0x3b, 0x29, 0x70, 0x87, 0x57, 0x1a, 0xb4, 0x14, 0x0b, 0x28, + 0xba, 0x0a, 0xe5, 0xb6, 0x13, 0xc6, 0x6e, 0xec, 0x06, 0xfe, 0x6c, 0xf1, 0x92, 0x75, 0x65, 0x68, + 0x71, 0x46, 0xa0, 0x96, 0xeb, 0x12, 0x80, 0x13, 0x1c, 0xda, 0x8d, 0x90, 0x38, 0xad, 0xdb, 0xbe, + 0xb7, 0x3f, 0x5b, 0xba, 0x64, 0x5d, 0x19, 0x4d, 0xba, 0x81, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x0f, + 0x17, 0x60, 0x74, 0x61, 0x73, 0xd3, 0xf5, 0xdd, 0x78, 0x1f, 0xdd, 0x85, 0x71, 0x3f, 0x68, 0x11, + 0xf9, 0x9f, 0x7d, 0xc5, 0xd8, 0xb5, 0x4b, 0xf3, 0xdd, 0x4b, 0x69, 0x7e, 0x4d, 0xc3, 0x5b, 0x9c, + 0x3e, 0x3c, 0xa8, 0x8c, 0xeb, 0x25, 0xd8, 0xa0, 0x83, 0x30, 0x8c, 0xb5, 0x83, 0x96, 0x22, 0x5b, + 0x60, 0x64, 0x2b, 0x59, 0x64, 0xeb, 0x09, 0xda, 0xe2, 0xd4, 0xe1, 0x41, 0x65, 0x4c, 0x2b, 0xc0, + 0x3a, 0x11, 0xb4, 0x01, 0x53, 0xf4, 0xaf, 0x1f, 0xbb, 0x8a, 0x6e, 0x91, 0xd1, 0x7d, 0x32, 0x8f, + 0xae, 0x86, 0xba, 0x78, 0xe6, 0xf0, 0xa0, 0x32, 0x95, 0x2a, 0xc4, 0x69, 0x82, 0xf6, 0xfb, 0x30, + 0xb9, 0x10, 0xc7, 0x4e, 0x73, 0x9b, 0xb4, 0xf8, 0x0c, 0xa2, 0x17, 0xa1, 0xe4, 0x3b, 0xbb, 0x44, + 0xcc, 0xef, 0x25, 0x31, 0xb0, 0xa5, 0x35, 0x67, 0x97, 0x1c, 0x1d, 0x54, 0xa6, 0xef, 0xf8, 0xee, + 0x7b, 0x1d, 0xb1, 0x2a, 0x68, 0x19, 0x66, 0xd8, 0xe8, 0x1a, 0x40, 0x8b, 0xec, 0xb9, 0x4d, 0x52, + 0x77, 0xe2, 0x6d, 0x31, 0xdf, 0x48, 0xd4, 0x85, 0xaa, 0x82, 0x60, 0x0d, 0xcb, 0xbe, 0x0f, 0xe5, + 0x85, 0xbd, 0xc0, 0x6d, 0xd5, 0x83, 0x56, 0x84, 0x76, 0x60, 0xaa, 0x1d, 0x92, 0x4d, 0x12, 0xaa, + 0xa2, 0x59, 0xeb, 0x52, 0xf1, 0xca, 0xd8, 0xb5, 0x2b, 0x99, 0x1f, 0x6b, 0xa2, 0x2e, 0xfb, 0x71, + 0xb8, 0xbf, 0xf8, 0xa8, 0x68, 0x6f, 0x2a, 0x05, 0xc5, 0x69, 0xca, 0xf6, 0xbf, 0x2d, 0xc0, 0xb9, + 0x85, 0xf7, 0x3b, 0x21, 0xa9, 0xba, 0xd1, 0x4e, 0x7a, 0x85, 0xb7, 0xdc, 0x68, 0x67, 0x2d, 0x19, + 0x01, 0xb5, 0xb4, 0xaa, 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x1c, 0x8c, 0xd0, 0xdf, 0x77, 0x70, 0x4d, + 0x7c, 0xf2, 0x19, 0x81, 0x3c, 0x56, 0x75, 0x62, 0xa7, 0xca, 0x41, 0x58, 0xe2, 0xa0, 0x55, 0x18, + 0x6b, 0xb2, 0x0d, 0xb9, 0xb5, 0x1a, 0xb4, 0x08, 0x9b, 0xcc, 0xf2, 0xe2, 0x33, 0x14, 0x7d, 0x29, + 0x29, 0x3e, 0x3a, 0xa8, 0xcc, 0xf2, 0xbe, 0x09, 0x12, 0x1a, 0x0c, 0xeb, 0xf5, 0x91, 0xad, 0xf6, + 0x57, 0x89, 0x51, 0x82, 0x8c, 0xbd, 0x75, 0x45, 0xdb, 0x2a, 0x43, 0x6c, 0xab, 0x8c, 0x67, 0x6f, + 0x13, 0xf4, 0x3c, 0x94, 0x76, 0x5c, 0xbf, 0x35, 0x3b, 0xcc, 0x68, 0x5d, 0xa0, 0x73, 0x7e, 0xd3, + 0xf5, 0x5b, 0x47, 0x07, 0x95, 0x19, 0xa3, 0x3b, 0xb4, 0x10, 0x33, 0x54, 0xfb, 0x8f, 0x2d, 0xa8, + 0x30, 0xd8, 0x8a, 0xeb, 0x91, 0x3a, 0x09, 0x23, 0x37, 0x8a, 0x89, 0x1f, 0x1b, 0x03, 0x7a, 0x0d, + 0x20, 0x22, 0xcd, 0x90, 0xc4, 0xda, 0x90, 0xaa, 0x85, 0xd1, 0x50, 0x10, 0xac, 0x61, 0xd1, 0x03, + 0x21, 0xda, 0x76, 0x42, 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0x03, 0xa1, 0x21, 0x01, 0x38, 0xc1, 0x31, + 0x0e, 0x84, 0x62, 0xbf, 0x03, 0x01, 0x7d, 0x16, 0xa6, 0x92, 0xc6, 0xa2, 0xb6, 0xd3, 0x94, 0x03, + 0xc8, 0xb6, 0x4c, 0xc3, 0x04, 0xe1, 0x34, 0xae, 0xfd, 0x8f, 0x2c, 0xb1, 0x78, 0xe8, 0x57, 0x7f, + 0xc4, 0xbf, 0xd5, 0xfe, 0x25, 0x0b, 0x46, 0x16, 0x5d, 0xbf, 0xe5, 0xfa, 0x5b, 0xe8, 0x4b, 0x30, + 0x4a, 0xef, 0xa6, 0x96, 0x13, 0x3b, 0xe2, 0xdc, 0xfb, 0x94, 0xb6, 0xb7, 0xd4, 0x55, 0x31, 0xdf, + 0xde, 0xd9, 0xa2, 0x05, 0xd1, 0x3c, 0xc5, 0xa6, 0xbb, 0xed, 0xf6, 0xc6, 0xbb, 0xa4, 0x19, 0xaf, + 0x92, 0xd8, 0x49, 0x3e, 0x27, 0x29, 0xc3, 0x8a, 0x2a, 0xba, 0x09, 0xc3, 0xb1, 0x13, 0x6e, 0x91, + 0x58, 0x1c, 0x80, 0x99, 0x07, 0x15, 0xaf, 0x89, 0xe9, 0x8e, 0x24, 0x7e, 0x93, 0x24, 0xd7, 0xc2, + 0x3a, 0xab, 0x8a, 0x05, 0x09, 0xfb, 0x6f, 0x0c, 0xc3, 0xf9, 0xa5, 0x46, 0x2d, 0x67, 0x5d, 0x5d, + 0x86, 0xe1, 0x56, 0xe8, 0xee, 0x91, 0x50, 0x8c, 0xb3, 0xa2, 0x52, 0x65, 0xa5, 0x58, 0x40, 0xd1, + 0x2b, 0x30, 0xce, 0x2f, 0xa4, 0x1b, 0x8e, 0xdf, 0xf2, 0xe4, 0x10, 0x9f, 0x15, 0xd8, 0xe3, 0x77, + 0x35, 0x18, 0x36, 0x30, 0x8f, 0xb9, 0xa8, 0x2e, 0xa7, 0x36, 0x63, 0xde, 0x65, 0xf7, 0x15, 0x0b, + 0xa6, 0x79, 0x33, 0x0b, 0x71, 0x1c, 0xba, 0x1b, 0x9d, 0x98, 0x44, 0xb3, 0x43, 0xec, 0xa4, 0x5b, + 0xca, 0x1a, 0xad, 0xdc, 0x11, 0x98, 0xbf, 0x9b, 0xa2, 0xc2, 0x0f, 0xc1, 0x59, 0xd1, 0xee, 0x74, + 0x1a, 0x8c, 0xbb, 0x9a, 0x45, 0xdf, 0x6b, 0xc1, 0x5c, 0x33, 0xf0, 0xe3, 0x30, 0xf0, 0x3c, 0x12, + 0xd6, 0x3b, 0x1b, 0x9e, 0x1b, 0x6d, 0xf3, 0x75, 0x8a, 0xc9, 0x26, 0x3b, 0x09, 0x72, 0xe6, 0x50, + 0x21, 0x89, 0x39, 0xbc, 0x78, 0x78, 0x50, 0x99, 0x5b, 0xca, 0x25, 0x85, 0x7b, 0x34, 0x83, 0x76, + 0x00, 0xd1, 0xab, 0xb4, 0x11, 0x3b, 0x5b, 0x24, 0x69, 0x7c, 0x64, 0xf0, 0xc6, 0x1f, 0x39, 0x3c, + 0xa8, 0xa0, 0xb5, 0x2e, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x0f, 0xce, 0xd2, 0xd2, 0xae, 0x6f, 0x1d, + 0x1d, 0xbc, 0xb9, 0xd9, 0xc3, 0x83, 0xca, 0xd9, 0xb5, 0x0c, 0x22, 0x38, 0x93, 0xf4, 0xdc, 0x12, + 0x9c, 0xcb, 0x9c, 0x2a, 0x34, 0x0d, 0xc5, 0x1d, 0xc2, 0x59, 0x90, 0x32, 0xa6, 0x3f, 0xd1, 0x59, + 0x18, 0xda, 0x73, 0xbc, 0x8e, 0x58, 0xa5, 0x98, 0xff, 0xf9, 0x4c, 0xe1, 0x15, 0xcb, 0x6e, 0xc2, + 0xf8, 0x92, 0xd3, 0x76, 0x36, 0x5c, 0xcf, 0x8d, 0x5d, 0x12, 0xa1, 0xa7, 0xa0, 0xe8, 0xb4, 0x5a, + 0xec, 0x8a, 0x2c, 0x2f, 0x9e, 0x3b, 0x3c, 0xa8, 0x14, 0x17, 0x5a, 0xf4, 0xac, 0x06, 0x85, 0xb5, + 0x8f, 0x29, 0x06, 0xfa, 0x24, 0x94, 0x5a, 0x61, 0xd0, 0x9e, 0x2d, 0x30, 0x4c, 0x3a, 0x54, 0xa5, + 0x6a, 0x18, 0xb4, 0x53, 0xa8, 0x0c, 0xc7, 0xfe, 0xb5, 0x02, 0x3c, 0xbe, 0x44, 0xda, 0xdb, 0x2b, + 0x8d, 0x9c, 0x4d, 0x77, 0x05, 0x46, 0x77, 0x03, 0xdf, 0x8d, 0x83, 0x30, 0x12, 0x4d, 0xb3, 0xdb, + 0x64, 0x55, 0x94, 0x61, 0x05, 0x45, 0x97, 0xa0, 0xd4, 0x4e, 0x38, 0x81, 0x71, 0xc9, 0x45, 0x30, + 0x1e, 0x80, 0x41, 0x28, 0x46, 0x27, 0x22, 0xa1, 0xb8, 0x05, 0x15, 0xc6, 0x9d, 0x88, 0x84, 0x98, + 0x41, 0x92, 0xe3, 0x94, 0x1e, 0xb4, 0x62, 0x5b, 0xa5, 0x8e, 0x53, 0x0a, 0xc1, 0x1a, 0x16, 0xaa, + 0x43, 0x39, 0x52, 0x93, 0x3a, 0x34, 0xf8, 0xa4, 0x4e, 0xb0, 0xf3, 0x56, 0xcd, 0x64, 0x42, 0xc4, + 0x38, 0x06, 0x86, 0xfb, 0x9e, 0xb7, 0x5f, 0x2f, 0x00, 0xe2, 0x43, 0xf8, 0xe7, 0x6c, 0xe0, 0xee, + 0x74, 0x0f, 0x5c, 0x26, 0xe7, 0x75, 0x2b, 0x68, 0x3a, 0x5e, 0xfa, 0x08, 0x3f, 0xa9, 0xd1, 0xfb, + 0xdf, 0x16, 0x3c, 0xbe, 0xe4, 0xfa, 0x2d, 0x12, 0xe6, 0x2c, 0xc0, 0x87, 0x23, 0x80, 0x1c, 0xef, + 0xa4, 0x37, 0x96, 0x58, 0xe9, 0x04, 0x96, 0x98, 0xfd, 0x47, 0x16, 0x20, 0xfe, 0xd9, 0x1f, 0xb9, + 0x8f, 0xbd, 0xd3, 0xfd, 0xb1, 0x27, 0xb0, 0x2c, 0xec, 0x5b, 0x30, 0xb9, 0xe4, 0xb9, 0xc4, 0x8f, + 0x6b, 0xf5, 0xa5, 0xc0, 0xdf, 0x74, 0xb7, 0xd0, 0x67, 0x60, 0x92, 0xca, 0xb4, 0x41, 0x27, 0x6e, + 0x90, 0x66, 0xe0, 0x33, 0xf6, 0x9f, 0x4a, 0x82, 0xe8, 0xf0, 0xa0, 0x32, 0xb9, 0x6e, 0x40, 0x70, + 0x0a, 0xd3, 0xfe, 0x1d, 0x3a, 0x7e, 0xc1, 0x6e, 0x3b, 0xf0, 0x89, 0x1f, 0x2f, 0x05, 0x7e, 0x8b, + 0x8b, 0x89, 0x9f, 0x81, 0x52, 0x4c, 0xc7, 0x83, 0x8f, 0xdd, 0x65, 0xb9, 0x51, 0xe8, 0x28, 0x1c, + 0x1d, 0x54, 0x1e, 0xe9, 0xae, 0xc1, 0xc6, 0x89, 0xd5, 0x41, 0xdf, 0x06, 0xc3, 0x51, 0xec, 0xc4, + 0x9d, 0x48, 0x8c, 0xe6, 0x13, 0x72, 0x34, 0x1b, 0xac, 0xf4, 0xe8, 0xa0, 0x32, 0xa5, 0xaa, 0xf1, + 0x22, 0x2c, 0x2a, 0xa0, 0xa7, 0x61, 0x64, 0x97, 0x44, 0x91, 0xb3, 0x25, 0x39, 0xfc, 0x29, 0x51, + 0x77, 0x64, 0x95, 0x17, 0x63, 0x09, 0x47, 0x4f, 0xc2, 0x10, 0x09, 0xc3, 0x20, 0x14, 0x7b, 0x74, + 0x42, 0x20, 0x0e, 0x2d, 0xd3, 0x42, 0xcc, 0x61, 0xf6, 0xbf, 0xb7, 0x60, 0x4a, 0xf5, 0x95, 0xb7, + 0x75, 0x0a, 0xac, 0xdc, 0xdb, 0x00, 0x4d, 0xf9, 0x81, 0x11, 0xbb, 0x3d, 0xc6, 0xae, 0x5d, 0xce, + 0x64, 0x50, 0xba, 0x86, 0x31, 0xa1, 0xac, 0x8a, 0x22, 0xac, 0x51, 0xb3, 0x7f, 0xd5, 0x82, 0x33, + 0xa9, 0x2f, 0xba, 0xe5, 0x46, 0x31, 0x7a, 0xa7, 0xeb, 0xab, 0xe6, 0x07, 0xfb, 0x2a, 0x5a, 0x9b, + 0x7d, 0x93, 0x5a, 0xca, 0xb2, 0x44, 0xfb, 0xa2, 0x1b, 0x30, 0xe4, 0xc6, 0x64, 0x57, 0x7e, 0xcc, + 0x93, 0x3d, 0x3f, 0x86, 0xf7, 0x2a, 0x99, 0x91, 0x1a, 0xad, 0x89, 0x39, 0x01, 0xfb, 0x87, 0x8a, + 0x50, 0xe6, 0xcb, 0x76, 0xd5, 0x69, 0x9f, 0xc2, 0x5c, 0xd4, 0xa0, 0xc4, 0xa8, 0xf3, 0x8e, 0x3f, + 0x95, 0xdd, 0x71, 0xd1, 0x9d, 0x79, 0x2a, 0xa7, 0x71, 0x56, 0x50, 0x5d, 0x0d, 0xb4, 0x08, 0x33, + 0x12, 0xc8, 0x01, 0xd8, 0x70, 0x7d, 0x27, 0xdc, 0xa7, 0x65, 0xb3, 0x45, 0x46, 0xf0, 0xb9, 0xde, + 0x04, 0x17, 0x15, 0x3e, 0x27, 0xab, 0xfa, 0x9a, 0x00, 0xb0, 0x46, 0x74, 0xee, 0x65, 0x28, 0x2b, + 0xe4, 0xe3, 0xf0, 0x38, 0x73, 0x9f, 0x85, 0xa9, 0x54, 0x5b, 0xfd, 0xaa, 0x8f, 0xeb, 0x2c, 0xd2, + 0x2f, 0xb3, 0x53, 0x40, 0xf4, 0x7a, 0xd9, 0xdf, 0x13, 0xa7, 0xe8, 0xfb, 0x70, 0xd6, 0xcb, 0x38, + 0x9c, 0xc4, 0x54, 0x0d, 0x7e, 0x98, 0x3d, 0x2e, 0x3e, 0xfb, 0x6c, 0x16, 0x14, 0x67, 0xb6, 0x41, + 0xaf, 0xfd, 0xa0, 0x4d, 0xd7, 0xbc, 0xe3, 0xb1, 0xfe, 0x0a, 0xe9, 0xfb, 0xb6, 0x28, 0xc3, 0x0a, + 0x4a, 0x8f, 0xb0, 0xb3, 0xaa, 0xf3, 0x37, 0xc9, 0x7e, 0x83, 0x78, 0xa4, 0x19, 0x07, 0xe1, 0x87, + 0xda, 0xfd, 0x0b, 0x7c, 0xf4, 0xf9, 0x09, 0x38, 0x26, 0x08, 0x14, 0x6f, 0x92, 0x7d, 0x3e, 0x15, + 0xfa, 0xd7, 0x15, 0x7b, 0x7e, 0xdd, 0xcf, 0x5a, 0x30, 0xa1, 0xbe, 0xee, 0x14, 0xb6, 0xfa, 0xa2, + 0xb9, 0xd5, 0x2f, 0xf4, 0x5c, 0xe0, 0x39, 0x9b, 0xfc, 0xeb, 0x05, 0x38, 0xaf, 0x70, 0x28, 0xbb, + 0xcf, 0xff, 0x88, 0x55, 0x75, 0x15, 0xca, 0xbe, 0xd2, 0x1e, 0x58, 0xa6, 0xd8, 0x9e, 0xe8, 0x0e, + 0x12, 0x1c, 0xca, 0xb5, 0xf9, 0x89, 0x88, 0x3f, 0xae, 0xab, 0xd5, 0x84, 0x0a, 0x6d, 0x11, 0x8a, + 0x1d, 0xb7, 0x25, 0xee, 0x8c, 0x4f, 0xc9, 0xd1, 0xbe, 0x53, 0xab, 0x1e, 0x1d, 0x54, 0x9e, 0xc8, + 0x53, 0xe9, 0xd2, 0xcb, 0x2a, 0x9a, 0xbf, 0x53, 0xab, 0x62, 0x5a, 0x19, 0x2d, 0xc0, 0x94, 0xd4, + 0x5a, 0xdf, 0xa5, 0x1c, 0x54, 0xe0, 0x8b, 0xab, 0x45, 0xe9, 0xc6, 0xb0, 0x09, 0xc6, 0x69, 0x7c, + 0x54, 0x85, 0xe9, 0x9d, 0xce, 0x06, 0xf1, 0x48, 0xcc, 0x3f, 0xf8, 0x26, 0xe1, 0x9a, 0xa3, 0x72, + 0x22, 0x5a, 0xde, 0x4c, 0xc1, 0x71, 0x57, 0x0d, 0xfb, 0xcf, 0xd8, 0x11, 0x2f, 0x46, 0xaf, 0x1e, + 0x06, 0x74, 0x61, 0x51, 0xea, 0x1f, 0xe6, 0x72, 0x1e, 0x64, 0x55, 0xdc, 0x24, 0xfb, 0xeb, 0x01, + 0x65, 0xb6, 0xb3, 0x57, 0x85, 0xb1, 0xe6, 0x4b, 0x3d, 0xd7, 0xfc, 0xcf, 0x17, 0xe0, 0x9c, 0x1a, + 0x01, 0x83, 0xaf, 0xfb, 0xf3, 0x3e, 0x06, 0xcf, 0xc3, 0x58, 0x8b, 0x6c, 0x3a, 0x1d, 0x2f, 0x56, + 0x6a, 0xcc, 0x21, 0xae, 0xca, 0xae, 0x26, 0xc5, 0x58, 0xc7, 0x39, 0xc6, 0xb0, 0xfd, 0xc4, 0x18, + 0xbb, 0x5b, 0x63, 0x87, 0xae, 0x71, 0xb5, 0x6b, 0xac, 0xdc, 0x5d, 0xf3, 0x24, 0x0c, 0xb9, 0xbb, + 0x94, 0xd7, 0x2a, 0x98, 0x2c, 0x54, 0x8d, 0x16, 0x62, 0x0e, 0x43, 0x9f, 0x80, 0x91, 0x66, 0xb0, + 0xbb, 0xeb, 0xf8, 0x2d, 0x76, 0xe5, 0x95, 0x17, 0xc7, 0x28, 0x3b, 0xb6, 0xc4, 0x8b, 0xb0, 0x84, + 0xa1, 0xc7, 0xa1, 0xe4, 0x84, 0x5b, 0xd1, 0x6c, 0x89, 0xe1, 0x8c, 0xd2, 0x96, 0x16, 0xc2, 0xad, + 0x08, 0xb3, 0x52, 0x2a, 0x55, 0xdd, 0x0b, 0xc2, 0x1d, 0xd7, 0xdf, 0xaa, 0xba, 0xa1, 0xd8, 0x12, + 0xea, 0x2e, 0x7c, 0x4b, 0x41, 0xb0, 0x86, 0x85, 0x56, 0x60, 0xa8, 0x1d, 0x84, 0x71, 0x34, 0x3b, + 0xcc, 0x86, 0xfb, 0x89, 0x9c, 0x83, 0x88, 0x7f, 0x6d, 0x3d, 0x08, 0xe3, 0xe4, 0x03, 0xe8, 0xbf, + 0x08, 0xf3, 0xea, 0xe8, 0xdb, 0xa0, 0x48, 0xfc, 0xbd, 0xd9, 0x11, 0x46, 0x65, 0x2e, 0x8b, 0xca, + 0xb2, 0xbf, 0x77, 0xd7, 0x09, 0x93, 0x53, 0x7a, 0xd9, 0xdf, 0xc3, 0xb4, 0x0e, 0xfa, 0x3c, 0x94, + 0xe5, 0x16, 0x8f, 0x84, 0x9a, 0x23, 0x73, 0x89, 0xc9, 0x83, 0x01, 0x93, 0xf7, 0x3a, 0x6e, 0x48, + 0x76, 0x89, 0x1f, 0x47, 0xc9, 0x99, 0x26, 0xa1, 0x11, 0x4e, 0xa8, 0xa1, 0xcf, 0x4b, 0xdd, 0xda, + 0x6a, 0xd0, 0xf1, 0xe3, 0x68, 0xb6, 0xcc, 0xba, 0x97, 0xf9, 0xea, 0x71, 0x37, 0xc1, 0x4b, 0x2b, + 0xdf, 0x78, 0x65, 0x6c, 0x90, 0x42, 0x18, 0x26, 0x3c, 0x77, 0x8f, 0xf8, 0x24, 0x8a, 0xea, 0x61, + 0xb0, 0x41, 0x66, 0x81, 0xf5, 0xfc, 0x7c, 0xf6, 0x63, 0x40, 0xb0, 0x41, 0x16, 0x67, 0x0e, 0x0f, + 0x2a, 0x13, 0xb7, 0xf4, 0x3a, 0xd8, 0x24, 0x81, 0xee, 0xc0, 0x24, 0x95, 0x6b, 0xdc, 0x84, 0xe8, + 0x58, 0x3f, 0xa2, 0x4c, 0xfa, 0xc0, 0x46, 0x25, 0x9c, 0x22, 0x82, 0xde, 0x80, 0xb2, 0xe7, 0x6e, + 0x92, 0xe6, 0x7e, 0xd3, 0x23, 0xb3, 0xe3, 0x8c, 0x62, 0xe6, 0xb6, 0xba, 0x25, 0x91, 0xb8, 0x5c, + 0xa4, 0xfe, 0xe2, 0xa4, 0x3a, 0xba, 0x0b, 0x8f, 0xc4, 0x24, 0xdc, 0x75, 0x7d, 0x87, 0x6e, 0x07, + 0x21, 0x2f, 0xb0, 0x27, 0x95, 0x09, 0xb6, 0xde, 0x2e, 0x8a, 0xa1, 0x7b, 0x64, 0x3d, 0x13, 0x0b, + 0xe7, 0xd4, 0x46, 0xb7, 0x61, 0x8a, 0xed, 0x84, 0x7a, 0xc7, 0xf3, 0xea, 0x81, 0xe7, 0x36, 0xf7, + 0x67, 0x27, 0x19, 0xc1, 0x4f, 0xc8, 0x7b, 0xa1, 0x66, 0x82, 0x8f, 0x0e, 0x2a, 0x90, 0xfc, 0xc3, + 0xe9, 0xda, 0x68, 0x83, 0xe9, 0xd0, 0x3b, 0xa1, 0x1b, 0xef, 0xd3, 0xf5, 0x4b, 0xee, 0xc7, 0xb3, + 0x53, 0x3d, 0x45, 0x61, 0x1d, 0x55, 0x29, 0xda, 0xf5, 0x42, 0x9c, 0x26, 0x48, 0xb7, 0x76, 0x14, + 0xb7, 0x5c, 0x7f, 0x76, 0x9a, 0x9d, 0x18, 0x6a, 0x67, 0x34, 0x68, 0x21, 0xe6, 0x30, 0xa6, 0x3f, + 0xa7, 0x3f, 0x6e, 0xd3, 0x13, 0x74, 0x86, 0x21, 0x26, 0xfa, 0x73, 0x09, 0xc0, 0x09, 0x0e, 0x65, + 0x6a, 0xe2, 0x78, 0x7f, 0x16, 0x31, 0x54, 0xb5, 0x5d, 0xd6, 0xd7, 0x3f, 0x8f, 0x69, 0x39, 0xba, + 0x05, 0x23, 0xc4, 0xdf, 0x5b, 0x09, 0x83, 0xdd, 0xd9, 0x33, 0xf9, 0x7b, 0x76, 0x99, 0xa3, 0xf0, + 0x03, 0x3d, 0x11, 0xf0, 0x44, 0x31, 0x96, 0x24, 0xd0, 0x7d, 0x98, 0xcd, 0x98, 0x11, 0x3e, 0x01, + 0x67, 0xd9, 0x04, 0xbc, 0x26, 0xea, 0xce, 0xae, 0xe7, 0xe0, 0x1d, 0xf5, 0x80, 0xe1, 0x5c, 0xea, + 0xe8, 0x0b, 0x30, 0xc1, 0x37, 0x14, 0x7f, 0x7c, 0x8b, 0x66, 0xcf, 0xb1, 0xaf, 0xb9, 0x94, 0xbf, + 0x39, 0x39, 0xe2, 0xe2, 0x39, 0xd1, 0xa1, 0x09, 0xbd, 0x34, 0xc2, 0x26, 0x35, 0x7b, 0x03, 0x26, + 0xd5, 0xb9, 0xc5, 0x96, 0x0e, 0xaa, 0xc0, 0x10, 0xe3, 0x76, 0x84, 0x7e, 0xab, 0x4c, 0x67, 0x8a, + 0x71, 0x42, 0x98, 0x97, 0xb3, 0x99, 0x72, 0xdf, 0x27, 0x8b, 0xfb, 0x31, 0xe1, 0x52, 0x75, 0x51, + 0x9b, 0x29, 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0xff, 0x38, 0xd7, 0x98, 0x1c, 0x8e, 0x03, 0x5c, 0x07, + 0xcf, 0xc2, 0xe8, 0x76, 0x10, 0xc5, 0x14, 0x9b, 0xb5, 0x31, 0x94, 0xf0, 0x89, 0x37, 0x44, 0x39, + 0x56, 0x18, 0xe8, 0x55, 0x98, 0x68, 0xea, 0x0d, 0x88, 0xbb, 0x4c, 0x0d, 0x81, 0xd1, 0x3a, 0x36, + 0x71, 0xd1, 0x2b, 0x30, 0xca, 0x9e, 0xce, 0x9b, 0x81, 0x27, 0x98, 0x2c, 0x79, 0x21, 0x8f, 0xd6, + 0x45, 0xf9, 0x91, 0xf6, 0x1b, 0x2b, 0x6c, 0x74, 0x19, 0x86, 0x69, 0x17, 0x6a, 0x75, 0x71, 0x8b, + 0x28, 0x55, 0xcd, 0x0d, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0x5b, 0x05, 0x6d, 0x94, 0xa9, 0x44, 0x4a, + 0x50, 0x1d, 0x46, 0xee, 0x39, 0x6e, 0xec, 0xfa, 0x5b, 0x82, 0x5d, 0x78, 0xba, 0xe7, 0x95, 0xc2, + 0x2a, 0xbd, 0xc5, 0x2b, 0xf0, 0x4b, 0x4f, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0x76, 0x7c, 0x9f, + 0x52, 0x2c, 0x0c, 0x4a, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4, 0x0e, 0x80, + 0x5c, 0x96, 0xa4, 0x25, 0x9e, 0xac, 0x9f, 0xed, 0x4f, 0x74, 0x5d, 0xd5, 0x59, 0x9c, 0xa4, 0x57, + 0x6a, 0xf2, 0x1f, 0x6b, 0xf4, 0xec, 0x98, 0xb1, 0x55, 0xdd, 0x9d, 0x41, 0xdf, 0x49, 0x4f, 0x02, + 0x27, 0x8c, 0x49, 0x6b, 0x21, 0x16, 0x83, 0xf3, 0xc9, 0xc1, 0x64, 0x8a, 0x75, 0x77, 0x97, 0xe8, + 0xa7, 0x86, 0x20, 0x82, 0x13, 0x7a, 0xf6, 0x2f, 0x16, 0x61, 0x36, 0xaf, 0xbb, 0x74, 0xd1, 0x91, + 0xfb, 0x6e, 0xbc, 0x44, 0xb9, 0x21, 0xcb, 0x5c, 0x74, 0xcb, 0xa2, 0x1c, 0x2b, 0x0c, 0x3a, 0xfb, + 0x91, 0xbb, 0x25, 0x45, 0xc2, 0xa1, 0x64, 0xf6, 0x1b, 0xac, 0x14, 0x0b, 0x28, 0xc5, 0x0b, 0x89, + 0x13, 0x09, 0x9b, 0x08, 0x6d, 0x95, 0x60, 0x56, 0x8a, 0x05, 0x54, 0xd7, 0x37, 0x95, 0xfa, 0xe8, + 0x9b, 0x8c, 0x21, 0x1a, 0x3a, 0xd9, 0x21, 0x42, 0x5f, 0x04, 0xd8, 0x74, 0x7d, 0x37, 0xda, 0x66, + 0xd4, 0x87, 0x8f, 0x4d, 0x5d, 0xf1, 0x52, 0x2b, 0x8a, 0x0a, 0xd6, 0x28, 0xa2, 0x97, 0x60, 0x4c, + 0x6d, 0xc0, 0x5a, 0x95, 0x3d, 0x10, 0x69, 0x0f, 0xee, 0xc9, 0x69, 0x54, 0xc5, 0x3a, 0x9e, 0xfd, + 0x6e, 0x7a, 0xbd, 0x88, 0x1d, 0xa0, 0x8d, 0xaf, 0x35, 0xe8, 0xf8, 0x16, 0x7a, 0x8f, 0xaf, 0xfd, + 0xeb, 0x45, 0x98, 0x32, 0x1a, 0xeb, 0x44, 0x03, 0x9c, 0x59, 0xd7, 0xe9, 0x3d, 0xe7, 0xc4, 0x44, + 0xec, 0x3f, 0xbb, 0xff, 0x56, 0xd1, 0xef, 0x42, 0xba, 0x03, 0x78, 0x7d, 0xf4, 0x45, 0x28, 0x7b, + 0x4e, 0xc4, 0x74, 0x57, 0x44, 0xec, 0xbb, 0x41, 0x88, 0x25, 0x72, 0x84, 0x13, 0xc5, 0xda, 0x55, + 0xc3, 0x69, 0x27, 0x24, 0xe9, 0x85, 0x4c, 0x79, 0x1f, 0x69, 0x74, 0xa3, 0x3a, 0x41, 0x19, 0xa4, + 0x7d, 0xcc, 0x61, 0xe8, 0x15, 0x18, 0x0f, 0x09, 0x5b, 0x15, 0x4b, 0x94, 0x95, 0x63, 0xcb, 0x6c, + 0x28, 0xe1, 0xf9, 0xb0, 0x06, 0xc3, 0x06, 0x66, 0xc2, 0xca, 0x0f, 0xf7, 0x60, 0xe5, 0x9f, 0x86, + 0x11, 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x35, 0x5e, 0x8c, 0x25, 0x3c, 0xbd, 0x60, 0x46, 0x07, + 0x5c, 0x30, 0x9f, 0x84, 0xc9, 0xaa, 0x43, 0x76, 0x03, 0x7f, 0xd9, 0x6f, 0xb5, 0x03, 0xd7, 0x8f, + 0xd1, 0x2c, 0x94, 0xd8, 0xed, 0xc0, 0xf7, 0x76, 0x89, 0x52, 0xc0, 0x25, 0xca, 0x98, 0xdb, 0x5b, + 0x70, 0xae, 0x1a, 0xdc, 0xf3, 0xef, 0x39, 0x61, 0x6b, 0xa1, 0x5e, 0xd3, 0xe4, 0xdc, 0x35, 0x29, + 0x67, 0x71, 0x23, 0x96, 0xcc, 0x33, 0x55, 0xab, 0xc9, 0xef, 0xda, 0x15, 0xd7, 0x23, 0x39, 0xda, + 0x88, 0xbf, 0x53, 0x30, 0x5a, 0x4a, 0xf0, 0xd5, 0x83, 0x91, 0x95, 0xfb, 0x60, 0xf4, 0x26, 0x8c, + 0x6e, 0xba, 0xc4, 0x6b, 0x61, 0xb2, 0x29, 0x96, 0xd8, 0x53, 0xf9, 0xef, 0xf2, 0x2b, 0x14, 0x53, + 0x6a, 0x9f, 0xb8, 0x94, 0xb6, 0x22, 0x2a, 0x63, 0x45, 0x06, 0xed, 0xc0, 0xb4, 0x14, 0x03, 0x24, + 0x54, 0x2c, 0xb8, 0xa7, 0x7b, 0xc9, 0x16, 0x26, 0xf1, 0xb3, 0x87, 0x07, 0x95, 0x69, 0x9c, 0x22, + 0x83, 0xbb, 0x08, 0x53, 0xb1, 0x6c, 0x97, 0x1e, 0xad, 0x25, 0x36, 0xfc, 0x4c, 0x2c, 0x63, 0x12, + 0x26, 0x2b, 0xb5, 0x7f, 0xd4, 0x82, 0x47, 0xbb, 0x46, 0x46, 0x48, 0xda, 0x27, 0x3c, 0x0b, 0x69, + 0xc9, 0xb7, 0xd0, 0x5f, 0xf2, 0xb5, 0xff, 0xb1, 0x05, 0x67, 0x97, 0x77, 0xdb, 0xf1, 0x7e, 0xd5, + 0x35, 0x5f, 0x77, 0x5e, 0x86, 0xe1, 0x5d, 0xd2, 0x72, 0x3b, 0xbb, 0x62, 0xe6, 0x2a, 0xf2, 0xf8, + 0x59, 0x65, 0xa5, 0x47, 0x07, 0x95, 0x89, 0x46, 0x1c, 0x84, 0xce, 0x16, 0xe1, 0x05, 0x58, 0xa0, + 0xb3, 0x43, 0xdc, 0x7d, 0x9f, 0xdc, 0x72, 0x77, 0x5d, 0x69, 0x67, 0xd1, 0x53, 0x77, 0x36, 0x2f, + 0x07, 0x74, 0xfe, 0xcd, 0x8e, 0xe3, 0xc7, 0x6e, 0xbc, 0x2f, 0x1e, 0x66, 0x24, 0x11, 0x9c, 0xd0, + 0xb3, 0xbf, 0x69, 0xc1, 0x94, 0x5c, 0xf7, 0x0b, 0xad, 0x56, 0x48, 0xa2, 0x08, 0xcd, 0x41, 0xc1, + 0x6d, 0x8b, 0x5e, 0x82, 0xe8, 0x65, 0xa1, 0x56, 0xc7, 0x05, 0xb7, 0x8d, 0xea, 0x50, 0xe6, 0xe6, + 0x1a, 0xc9, 0xe2, 0x1a, 0xc8, 0xe8, 0x83, 0xf5, 0x60, 0x5d, 0xd6, 0xc4, 0x09, 0x11, 0xc9, 0xc1, + 0xb1, 0x33, 0xb3, 0x68, 0xbe, 0x7a, 0xdd, 0x10, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x30, 0xea, 0x07, + 0x2d, 0x6e, 0x3d, 0xc3, 0x6f, 0x3f, 0xb6, 0x64, 0xd7, 0x44, 0x19, 0x56, 0x50, 0xfb, 0x07, 0x2d, + 0x18, 0x97, 0x5f, 0x36, 0x20, 0x33, 0x49, 0xb7, 0x56, 0xc2, 0x48, 0x26, 0x5b, 0x8b, 0x32, 0x83, + 0x0c, 0x62, 0xf0, 0x80, 0xc5, 0xe3, 0xf0, 0x80, 0xf6, 0x8f, 0x14, 0x60, 0x52, 0x76, 0xa7, 0xd1, + 0xd9, 0x88, 0x48, 0x8c, 0xd6, 0xa1, 0xec, 0xf0, 0x21, 0x27, 0x72, 0xc5, 0x3e, 0x99, 0x2d, 0x7c, + 0x18, 0xf3, 0x93, 0x5c, 0xcb, 0x0b, 0xb2, 0x36, 0x4e, 0x08, 0x21, 0x0f, 0x66, 0xfc, 0x20, 0x66, + 0x47, 0xb4, 0x82, 0xf7, 0x7a, 0x02, 0x49, 0x53, 0x3f, 0x2f, 0xa8, 0xcf, 0xac, 0xa5, 0xa9, 0xe0, + 0x6e, 0xc2, 0x68, 0x59, 0x2a, 0x3c, 0x8a, 0xf9, 0xe2, 0x86, 0x3e, 0x0b, 0xd9, 0xfa, 0x0e, 0xfb, + 0x57, 0x2c, 0x28, 0x4b, 0xb4, 0xd3, 0x78, 0xed, 0x5a, 0x85, 0x91, 0x88, 0x4d, 0x82, 0x1c, 0x1a, + 0xbb, 0x57, 0xc7, 0xf9, 0x7c, 0x25, 0x37, 0x0f, 0xff, 0x1f, 0x61, 0x49, 0x83, 0xe9, 0xbb, 0x55, + 0xf7, 0x3f, 0x22, 0xfa, 0x6e, 0xd5, 0x9f, 0x9c, 0x1b, 0xe6, 0xbf, 0xb3, 0x3e, 0x6b, 0x62, 0x2d, + 0x65, 0x90, 0xda, 0x21, 0xd9, 0x74, 0xef, 0xa7, 0x19, 0xa4, 0x3a, 0x2b, 0xc5, 0x02, 0x8a, 0xde, + 0x81, 0xf1, 0xa6, 0x54, 0x74, 0x26, 0xc7, 0xc0, 0xe5, 0x9e, 0x4a, 0x77, 0xf5, 0x3e, 0xc3, 0x2d, + 0x6b, 0x97, 0xb4, 0xfa, 0xd8, 0xa0, 0x66, 0x3e, 0xb7, 0x17, 0xfb, 0x3d, 0xb7, 0x27, 0x74, 0xf3, + 0x1f, 0x9f, 0x7f, 0xcc, 0x82, 0x61, 0xae, 0x2e, 0x1b, 0x4c, 0xbf, 0xa8, 0x3d, 0x57, 0x25, 0x63, + 0x77, 0x97, 0x16, 0x8a, 0xe7, 0x27, 0xb4, 0x0a, 0x65, 0xf6, 0x83, 0xa9, 0x0d, 0x8a, 0xf9, 0x26, + 0xc5, 0xbc, 0x55, 0xbd, 0x83, 0x77, 0x65, 0x35, 0x9c, 0x50, 0xb0, 0xbf, 0x56, 0xa4, 0x47, 0x55, + 0x82, 0x6a, 0xdc, 0xe0, 0xd6, 0xc3, 0xbb, 0xc1, 0x0b, 0x0f, 0xeb, 0x06, 0xdf, 0x82, 0xa9, 0xa6, + 0xf6, 0xb8, 0x95, 0xcc, 0xe4, 0x95, 0x9e, 0x8b, 0x44, 0x7b, 0x07, 0xe3, 0x2a, 0xa3, 0x25, 0x93, + 0x08, 0x4e, 0x53, 0x45, 0xdf, 0x09, 0xe3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x62, 0xe1, 0x13, 0xf9, + 0xeb, 0x45, 0x6f, 0x82, 0xad, 0xc4, 0x86, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0x5f, 0x1c, 0x85, 0xa1, + 0xe5, 0x3d, 0xe2, 0xc7, 0xa7, 0x70, 0x20, 0x35, 0x61, 0xd2, 0xf5, 0xf7, 0x02, 0x6f, 0x8f, 0xb4, + 0x38, 0xfc, 0x38, 0x97, 0xeb, 0x23, 0x82, 0xf4, 0x64, 0xcd, 0x20, 0x81, 0x53, 0x24, 0x1f, 0x86, + 0x84, 0x79, 0x1d, 0x86, 0xf9, 0xdc, 0x0b, 0xf1, 0x32, 0x53, 0x19, 0xcc, 0x06, 0x51, 0xec, 0x82, + 0x44, 0xfa, 0xe5, 0xda, 0x67, 0x51, 0x1d, 0xbd, 0x0b, 0x93, 0x9b, 0x6e, 0x18, 0xc5, 0x54, 0x34, + 0x8c, 0x62, 0x67, 0xb7, 0xfd, 0x00, 0x12, 0xa5, 0x1a, 0x87, 0x15, 0x83, 0x12, 0x4e, 0x51, 0x46, + 0x5b, 0x30, 0x41, 0x85, 0x9c, 0xa4, 0xa9, 0x91, 0x63, 0x37, 0xa5, 0x54, 0x46, 0xb7, 0x74, 0x42, + 0xd8, 0xa4, 0x4b, 0x0f, 0x93, 0x26, 0x13, 0x8a, 0x46, 0x19, 0x47, 0xa1, 0x0e, 0x13, 0x2e, 0x0d, + 0x71, 0x18, 0x3d, 0x93, 0x98, 0xd9, 0x4a, 0xd9, 0x3c, 0x93, 0x34, 0xe3, 0x94, 0x2f, 0x41, 0x99, + 0xd0, 0x21, 0xa4, 0x84, 0x85, 0x62, 0xfc, 0xea, 0x60, 0x7d, 0x5d, 0x75, 0x9b, 0x61, 0x60, 0xca, + 0xf2, 0xcb, 0x92, 0x12, 0x4e, 0x88, 0xa2, 0x25, 0x18, 0x8e, 0x48, 0xe8, 0x92, 0x48, 0xa8, 0xc8, + 0x7b, 0x4c, 0x23, 0x43, 0xe3, 0xb6, 0xe7, 0xfc, 0x37, 0x16, 0x55, 0xe9, 0xf2, 0x72, 0x98, 0x34, + 0xc4, 0xb4, 0xe2, 0xda, 0xf2, 0x5a, 0x60, 0xa5, 0x58, 0x40, 0xd1, 0x1b, 0x30, 0x12, 0x12, 0x8f, + 0x29, 0x8b, 0x26, 0x06, 0x5f, 0xe4, 0x5c, 0xf7, 0xc4, 0xeb, 0x61, 0x49, 0x00, 0xdd, 0x04, 0x14, + 0x12, 0xca, 0x43, 0xb8, 0xfe, 0x96, 0x32, 0xe6, 0x10, 0xba, 0xee, 0xc7, 0x44, 0xfb, 0x67, 0x70, + 0x82, 0x21, 0xad, 0x52, 0x71, 0x46, 0x35, 0x74, 0x1d, 0x66, 0x54, 0x69, 0xcd, 0x8f, 0x62, 0xc7, + 0x6f, 0x12, 0xa6, 0xe6, 0x2e, 0x27, 0x5c, 0x11, 0x4e, 0x23, 0xe0, 0xee, 0x3a, 0xf6, 0x4f, 0x53, + 0x76, 0x86, 0x8e, 0xd6, 0x29, 0xf0, 0x02, 0xaf, 0x9b, 0xbc, 0xc0, 0xf9, 0xdc, 0x99, 0xcb, 0xe1, + 0x03, 0x0e, 0x2d, 0x18, 0xd3, 0x66, 0x36, 0x59, 0xb3, 0x56, 0x8f, 0x35, 0xdb, 0x81, 0x69, 0xba, + 0xd2, 0x6f, 0x6f, 0x44, 0x24, 0xdc, 0x23, 0x2d, 0xb6, 0x30, 0x0b, 0x0f, 0xb6, 0x30, 0xd5, 0x2b, + 0xf3, 0xad, 0x14, 0x41, 0xdc, 0xd5, 0x04, 0x7a, 0x59, 0x6a, 0x4e, 0x8a, 0x86, 0x91, 0x16, 0xd7, + 0x8a, 0x1c, 0x1d, 0x54, 0xa6, 0xb5, 0x0f, 0xd1, 0x35, 0x25, 0xf6, 0x97, 0xe4, 0x37, 0xaa, 0xd7, + 0xfc, 0xa6, 0x5a, 0x2c, 0xa9, 0xd7, 0x7c, 0xb5, 0x1c, 0x70, 0x82, 0x43, 0xf7, 0x28, 0x15, 0x41, + 0xd2, 0xaf, 0xf9, 0x54, 0x40, 0xc1, 0x0c, 0x62, 0xbf, 0x00, 0xb0, 0x7c, 0x9f, 0x34, 0xf9, 0x52, + 0xd7, 0x1f, 0x20, 0xad, 0xfc, 0x07, 0x48, 0xfb, 0x3f, 0x5a, 0x30, 0xb9, 0xb2, 0x64, 0x88, 0x89, + 0xf3, 0x00, 0x5c, 0x36, 0x7a, 0xeb, 0xad, 0x35, 0xa9, 0x5b, 0xe7, 0xea, 0x51, 0x55, 0x8a, 0x35, + 0x0c, 0x74, 0x1e, 0x8a, 0x5e, 0xc7, 0x17, 0x22, 0xcb, 0xc8, 0xe1, 0x41, 0xa5, 0x78, 0xab, 0xe3, + 0x63, 0x5a, 0xa6, 0x59, 0x08, 0x16, 0x07, 0xb6, 0x10, 0xec, 0xeb, 0x5e, 0x85, 0x2a, 0x30, 0x74, + 0xef, 0x9e, 0xdb, 0xe2, 0x46, 0xec, 0x42, 0xef, 0xff, 0xd6, 0x5b, 0xb5, 0x6a, 0x84, 0x79, 0xb9, + 0xfd, 0xd5, 0x22, 0xcc, 0xad, 0x78, 0xe4, 0xfe, 0x07, 0x34, 0xe4, 0x1f, 0xd4, 0xbe, 0xf1, 0x78, + 0xfc, 0xe2, 0x71, 0x6d, 0x58, 0xfb, 0x8f, 0xc7, 0x26, 0x8c, 0xf0, 0xc7, 0x6c, 0x69, 0xd6, 0xff, + 0x6a, 0x56, 0xeb, 0xf9, 0x03, 0x32, 0xcf, 0x1f, 0xc5, 0x85, 0x39, 0xbf, 0xba, 0x69, 0x45, 0x29, + 0x96, 0xc4, 0xe7, 0x3e, 0x03, 0xe3, 0x3a, 0xe6, 0xb1, 0xac, 0xc9, 0xff, 0x4a, 0x11, 0xa6, 0x69, + 0x0f, 0x1e, 0xea, 0x44, 0xdc, 0xe9, 0x9e, 0x88, 0x93, 0xb6, 0x28, 0xee, 0x3f, 0x1b, 0xef, 0xa4, + 0x67, 0xe3, 0xf9, 0xbc, 0xd9, 0x38, 0xed, 0x39, 0xf8, 0x5e, 0x0b, 0xce, 0xac, 0x78, 0x41, 0x73, + 0x27, 0x65, 0xf5, 0xfb, 0x12, 0x8c, 0xd1, 0x73, 0x3c, 0x32, 0xbc, 0x88, 0x0c, 0xbf, 0x32, 0x01, + 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xce, 0x9d, 0x5a, 0x35, 0xcb, 0x1d, 0x4d, 0x80, 0xb0, 0x8e, 0x67, + 0x7f, 0xc3, 0x82, 0x0b, 0xd7, 0x97, 0x96, 0x93, 0xa5, 0xd8, 0xe5, 0x11, 0x47, 0xa5, 0xc0, 0x96, + 0xd6, 0x95, 0x44, 0x0a, 0xac, 0xb2, 0x5e, 0x08, 0xe8, 0x47, 0xc5, 0xdb, 0xf3, 0xa7, 0x2c, 0x38, + 0x73, 0xdd, 0x8d, 0xe9, 0xb5, 0x9c, 0xf6, 0xcd, 0xa2, 0xf7, 0x72, 0xe4, 0xc6, 0x41, 0xb8, 0x9f, + 0xf6, 0xcd, 0xc2, 0x0a, 0x82, 0x35, 0x2c, 0xde, 0xf2, 0x9e, 0xcb, 0xcc, 0xa8, 0x0a, 0xa6, 0x2a, + 0x0a, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0x87, 0xb5, 0xdc, 0x90, 0x89, 0x12, 0xfb, 0xe2, 0x84, 0x55, + 0x1f, 0x56, 0x95, 0x00, 0x9c, 0xe0, 0xd8, 0x7f, 0x68, 0x41, 0xe5, 0xba, 0xd7, 0x89, 0x62, 0x12, + 0x6e, 0x46, 0x39, 0xa7, 0xe3, 0x0b, 0x50, 0x26, 0x52, 0x70, 0x17, 0xbd, 0x56, 0xac, 0xa6, 0x92, + 0xe8, 0xb9, 0x8b, 0x98, 0xc2, 0x1b, 0xc0, 0x87, 0xe0, 0x78, 0x46, 0xe0, 0x2b, 0x80, 0x88, 0xde, + 0x96, 0xee, 0x33, 0xc7, 0x9c, 0x6f, 0x96, 0xbb, 0xa0, 0x38, 0xa3, 0x86, 0xfd, 0xa3, 0x16, 0x9c, + 0x53, 0x1f, 0xfc, 0x91, 0xfb, 0x4c, 0xfb, 0xe7, 0x0a, 0x30, 0x71, 0x63, 0x7d, 0xbd, 0x7e, 0x9d, + 0xc4, 0xe2, 0xda, 0xee, 0xaf, 0x5b, 0xc7, 0x9a, 0x8a, 0xb0, 0x97, 0x14, 0xd8, 0x89, 0x5d, 0x6f, + 0x9e, 0xbb, 0x5e, 0xcf, 0xd7, 0xfc, 0xf8, 0x76, 0xd8, 0x88, 0x43, 0xd7, 0xdf, 0xca, 0x54, 0x2a, + 0x4a, 0xe6, 0xa2, 0x98, 0xc7, 0x5c, 0xa0, 0x17, 0x60, 0x98, 0xf9, 0x7e, 0xcb, 0x49, 0x78, 0x4c, + 0x09, 0x51, 0xac, 0xf4, 0xe8, 0xa0, 0x52, 0xbe, 0x83, 0x6b, 0xfc, 0x0f, 0x16, 0xa8, 0xe8, 0x0e, + 0x8c, 0x6d, 0xc7, 0x71, 0xfb, 0x06, 0x71, 0x5a, 0x24, 0x94, 0xc7, 0xe1, 0xc5, 0xac, 0xe3, 0x90, + 0x0e, 0x02, 0x47, 0x4b, 0x4e, 0x90, 0xa4, 0x2c, 0xc2, 0x3a, 0x1d, 0xbb, 0x01, 0x90, 0xc0, 0x4e, + 0x48, 0xa1, 0x62, 0xff, 0xbe, 0x05, 0x23, 0xdc, 0x0d, 0x2f, 0x44, 0xaf, 0x41, 0x89, 0xdc, 0x27, + 0x4d, 0xc1, 0x2a, 0x67, 0x76, 0x38, 0xe1, 0xb4, 0xf8, 0xf3, 0x00, 0xfd, 0x8f, 0x59, 0x2d, 0x74, + 0x03, 0x46, 0x68, 0x6f, 0xaf, 0x2b, 0x9f, 0xc4, 0x27, 0xf2, 0xbe, 0x58, 0x4d, 0x3b, 0x67, 0xce, + 0x44, 0x11, 0x96, 0xd5, 0x99, 0xaa, 0xbb, 0xd9, 0x6e, 0xd0, 0x13, 0x3b, 0xee, 0xc5, 0x58, 0xac, + 0x2f, 0xd5, 0x39, 0x92, 0xa0, 0xc6, 0x55, 0xdd, 0xb2, 0x10, 0x27, 0x44, 0xec, 0x75, 0x28, 0xd3, + 0x49, 0x5d, 0xf0, 0x5c, 0xa7, 0xb7, 0x96, 0xfd, 0x19, 0x28, 0x4b, 0x8d, 0x77, 0x24, 0x3c, 0xb9, + 0x18, 0x55, 0xa9, 0x10, 0x8f, 0x70, 0x02, 0xb7, 0x37, 0xe1, 0x2c, 0x33, 0x75, 0x70, 0xe2, 0x6d, + 0x63, 0x8f, 0xf5, 0x5f, 0xcc, 0xcf, 0x0a, 0xc9, 0x93, 0xcf, 0xcc, 0xac, 0xe6, 0x2c, 0x31, 0x2e, + 0x29, 0x26, 0x52, 0xa8, 0xfd, 0x07, 0x25, 0x78, 0xac, 0xd6, 0xc8, 0xf7, 0xd0, 0x7c, 0x05, 0xc6, + 0x39, 0x5f, 0x4a, 0x97, 0xb6, 0xe3, 0x89, 0x76, 0xd5, 0x43, 0xe0, 0xba, 0x06, 0xc3, 0x06, 0x26, + 0xba, 0x00, 0x45, 0xf7, 0x3d, 0x3f, 0x6d, 0x77, 0x5c, 0x7b, 0x73, 0x0d, 0xd3, 0x72, 0x0a, 0xa6, + 0x2c, 0x2e, 0xbf, 0x3b, 0x14, 0x58, 0xb1, 0xb9, 0xaf, 0xc3, 0xa4, 0x1b, 0x35, 0x23, 0xb7, 0xe6, + 0xd3, 0x73, 0x46, 0x3b, 0xa9, 0x94, 0x56, 0x84, 0x76, 0x5a, 0x41, 0x71, 0x0a, 0x5b, 0xbb, 0xc8, + 0x86, 0x06, 0x66, 0x93, 0xfb, 0xba, 0x36, 0x51, 0x09, 0xa0, 0xcd, 0xbe, 0x2e, 0x62, 0x56, 0x7c, + 0x42, 0x02, 0xe0, 0x1f, 0x1c, 0x61, 0x09, 0xa3, 0x22, 0x67, 0x73, 0xdb, 0x69, 0x2f, 0x74, 0xe2, + 0xed, 0xaa, 0x1b, 0x35, 0x83, 0x3d, 0x12, 0xee, 0x33, 0x6d, 0xc1, 0x68, 0x22, 0x72, 0x2a, 0xc0, + 0xd2, 0x8d, 0x85, 0x3a, 0xc5, 0xc4, 0xdd, 0x75, 0x4c, 0x36, 0x18, 0x4e, 0x82, 0x0d, 0x5e, 0x80, + 0x29, 0xd9, 0x4c, 0x83, 0x44, 0xec, 0x52, 0x1c, 0x63, 0x1d, 0x53, 0xb6, 0xc5, 0xa2, 0x58, 0x75, + 0x2b, 0x8d, 0x8f, 0x5e, 0x86, 0x09, 0xd7, 0x77, 0x63, 0xd7, 0x89, 0x83, 0x90, 0xb1, 0x14, 0x5c, + 0x31, 0xc0, 0x4c, 0xf7, 0x6a, 0x3a, 0x00, 0x9b, 0x78, 0xf6, 0x7f, 0x2d, 0xc1, 0x0c, 0x9b, 0xb6, + 0x6f, 0xad, 0xb0, 0x8f, 0xcc, 0x0a, 0xbb, 0xd3, 0xbd, 0xc2, 0x4e, 0x82, 0xbf, 0xff, 0x30, 0x97, + 0xd9, 0xbb, 0x50, 0x56, 0xc6, 0xcf, 0xd2, 0xfb, 0xc1, 0xca, 0xf1, 0x7e, 0xe8, 0xcf, 0x7d, 0xc8, + 0x77, 0xeb, 0x62, 0xe6, 0xbb, 0xf5, 0xdf, 0xb3, 0x20, 0xb1, 0x01, 0x45, 0x37, 0xa0, 0xdc, 0x0e, + 0x98, 0x9d, 0x45, 0x28, 0x8d, 0x97, 0x1e, 0xcb, 0xbc, 0xa8, 0xf8, 0xa5, 0xc8, 0xc7, 0xaf, 0x2e, + 0x6b, 0xe0, 0xa4, 0x32, 0x5a, 0x84, 0x91, 0x76, 0x48, 0x1a, 0x31, 0xf3, 0xf9, 0xed, 0x4b, 0x87, + 0xaf, 0x11, 0x8e, 0x8f, 0x65, 0x45, 0xfb, 0xe7, 0x2d, 0x00, 0xfe, 0x34, 0xec, 0xf8, 0x5b, 0xe4, + 0x14, 0xd4, 0xdd, 0x55, 0x28, 0x45, 0x6d, 0xd2, 0xec, 0x65, 0x01, 0x93, 0xf4, 0xa7, 0xd1, 0x26, + 0xcd, 0x64, 0xc0, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, 0xdf, 0x07, 0x30, 0x99, 0xa0, 0xd5, 0x62, 0xb2, + 0x8b, 0x9e, 0x33, 0x7c, 0x00, 0xcf, 0xa7, 0x7c, 0x00, 0xcb, 0x0c, 0x5b, 0xd3, 0xac, 0xbe, 0x0b, + 0xc5, 0x5d, 0xe7, 0xbe, 0x50, 0x9d, 0x3d, 0xd3, 0xbb, 0x1b, 0x94, 0xfe, 0xfc, 0xaa, 0x73, 0x9f, + 0x0b, 0x89, 0xcf, 0xc8, 0x05, 0xb2, 0xea, 0xdc, 0x3f, 0xe2, 0x76, 0x2e, 0xec, 0x90, 0xba, 0xe5, + 0x46, 0xf1, 0x97, 0xff, 0x4b, 0xf2, 0x9f, 0x2d, 0x3b, 0xda, 0x08, 0x6b, 0xcb, 0xf5, 0xc5, 0x43, + 0xe9, 0x40, 0x6d, 0xb9, 0x7e, 0xba, 0x2d, 0xd7, 0x1f, 0xa0, 0x2d, 0xd7, 0x47, 0xef, 0xc3, 0x88, + 0x30, 0x4a, 0x60, 0xc6, 0xed, 0xa6, 0x5a, 0x2e, 0xaf, 0x3d, 0x61, 0xd3, 0xc0, 0xdb, 0xbc, 0x2a, + 0x85, 0x60, 0x51, 0xda, 0xb7, 0x5d, 0xd9, 0x20, 0xfa, 0xdb, 0x16, 0x4c, 0x8a, 0xdf, 0x98, 0xbc, + 0xd7, 0x21, 0x51, 0x2c, 0x78, 0xcf, 0x4f, 0x0f, 0xde, 0x07, 0x51, 0x91, 0x77, 0xe5, 0xd3, 0xf2, + 0x98, 0x35, 0x81, 0x7d, 0x7b, 0x94, 0xea, 0x05, 0xfa, 0xa7, 0x16, 0x9c, 0xdd, 0x75, 0xee, 0xf3, + 0x16, 0x79, 0x19, 0x76, 0x62, 0x37, 0x10, 0xc6, 0xfa, 0xaf, 0x0d, 0x36, 0xfd, 0x5d, 0xd5, 0x79, + 0x27, 0xa5, 0x5d, 0xef, 0xd9, 0x2c, 0x94, 0xbe, 0x5d, 0xcd, 0xec, 0xd7, 0xdc, 0x26, 0x8c, 0xca, + 0xf5, 0x96, 0xa1, 0x6a, 0xa8, 0xea, 0x8c, 0xf5, 0xb1, 0x6d, 0x42, 0x74, 0x47, 0x3c, 0xda, 0x8e, + 0x58, 0x6b, 0x0f, 0xb5, 0x9d, 0x77, 0x61, 0x5c, 0x5f, 0x63, 0x0f, 0xb5, 0xad, 0xf7, 0xe0, 0x4c, + 0xc6, 0x5a, 0x7a, 0xa8, 0x4d, 0xde, 0x83, 0xf3, 0xb9, 0xeb, 0xe3, 0x61, 0x36, 0x6c, 0xff, 0x9c, + 0xa5, 0x9f, 0x83, 0xa7, 0xf0, 0xe6, 0xb0, 0x64, 0xbe, 0x39, 0x5c, 0xec, 0xbd, 0x73, 0x72, 0x1e, + 0x1e, 0xde, 0xd1, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, 0x06, 0x0c, 0x7b, 0xb4, 0x44, 0x5a, 0xc3, 0xd8, + 0xfd, 0x77, 0x64, 0xc2, 0x4b, 0xb1, 0xf2, 0x08, 0x0b, 0x0a, 0xf6, 0x2f, 0x59, 0x50, 0x3a, 0x85, + 0x91, 0xc0, 0xe6, 0x48, 0x3c, 0x97, 0x4b, 0x5a, 0xc4, 0x70, 0x9b, 0xc7, 0xce, 0xbd, 0xe5, 0xfb, + 0x31, 0xf1, 0x23, 0x26, 0x2a, 0x66, 0x0e, 0xcc, 0x77, 0xc1, 0x99, 0x5b, 0x81, 0xd3, 0x5a, 0x74, + 0x3c, 0xc7, 0x6f, 0x92, 0xb0, 0xe6, 0x6f, 0xf5, 0x35, 0xcb, 0xd2, 0x8d, 0xa8, 0x0a, 0xfd, 0x8c, + 0xa8, 0xec, 0x6d, 0x40, 0x7a, 0x03, 0xc2, 0x70, 0x15, 0xc3, 0x88, 0xcb, 0x9b, 0x12, 0xc3, 0xff, + 0x54, 0x36, 0x77, 0xd7, 0xd5, 0x33, 0xcd, 0x24, 0x93, 0x17, 0x60, 0x49, 0xc8, 0x7e, 0x05, 0x32, + 0x9d, 0xd5, 0xfa, 0xab, 0x0d, 0xec, 0xcf, 0xc3, 0x0c, 0xab, 0x79, 0x4c, 0x91, 0xd6, 0x4e, 0x69, + 0x25, 0x33, 0x62, 0x64, 0xd9, 0x5f, 0xb1, 0x60, 0x6a, 0x2d, 0x15, 0xb0, 0xe3, 0x32, 0x7b, 0x00, + 0xcd, 0x50, 0x86, 0x37, 0x58, 0x29, 0x16, 0xd0, 0x13, 0xd7, 0x41, 0xfd, 0x99, 0x05, 0x89, 0xff, + 0xe8, 0x29, 0x30, 0x5e, 0x4b, 0x06, 0xe3, 0x95, 0xa9, 0x1b, 0x51, 0xdd, 0xc9, 0xe3, 0xbb, 0xd0, + 0x4d, 0x15, 0x2c, 0xa1, 0x87, 0x5a, 0x24, 0x21, 0xc3, 0x5d, 0xeb, 0x27, 0xcd, 0x88, 0x0a, 0x32, + 0x7c, 0x02, 0xb3, 0x9d, 0x52, 0xb8, 0x1f, 0x11, 0xdb, 0x29, 0xd5, 0x9f, 0x9c, 0x1d, 0x5a, 0xd7, + 0xba, 0xcc, 0x4e, 0xae, 0x6f, 0x67, 0xb6, 0xf0, 0x8e, 0xe7, 0xbe, 0x4f, 0x54, 0xc4, 0x97, 0x8a, + 0xb0, 0x6d, 0x17, 0xa5, 0x47, 0x07, 0x95, 0x09, 0xf5, 0x8f, 0x87, 0x05, 0x4b, 0xaa, 0xd8, 0x37, + 0x60, 0x2a, 0x35, 0x60, 0xe8, 0x25, 0x18, 0x6a, 0x6f, 0x3b, 0x11, 0x49, 0xd9, 0x8b, 0x0e, 0xd5, + 0x69, 0xe1, 0xd1, 0x41, 0x65, 0x52, 0x55, 0x60, 0x25, 0x98, 0x63, 0xdb, 0xff, 0xd3, 0x82, 0xd2, + 0x5a, 0xd0, 0x3a, 0x8d, 0xc5, 0xf4, 0xba, 0xb1, 0x98, 0x1e, 0xcf, 0x0b, 0xaa, 0x98, 0xbb, 0x8e, + 0x56, 0x52, 0xeb, 0xe8, 0x62, 0x2e, 0x85, 0xde, 0x4b, 0x68, 0x17, 0xc6, 0x58, 0xa8, 0x46, 0x61, + 0xbf, 0xfa, 0x82, 0x21, 0x03, 0x54, 0x52, 0x32, 0xc0, 0x94, 0x86, 0xaa, 0x49, 0x02, 0x4f, 0xc3, + 0x88, 0xb0, 0xa1, 0x4c, 0x5b, 0xfd, 0x0b, 0x5c, 0x2c, 0xe1, 0xf6, 0x8f, 0x15, 0xc1, 0x08, 0x0d, + 0x89, 0x7e, 0xc5, 0x82, 0xf9, 0x90, 0xbb, 0x51, 0xb6, 0xaa, 0x9d, 0xd0, 0xf5, 0xb7, 0x1a, 0xcd, + 0x6d, 0xd2, 0xea, 0x78, 0xae, 0xbf, 0x55, 0xdb, 0xf2, 0x03, 0x55, 0xbc, 0x7c, 0x9f, 0x34, 0x3b, + 0xec, 0x21, 0xa4, 0x4f, 0x1c, 0x4a, 0x65, 0xa3, 0x74, 0xed, 0xf0, 0xa0, 0x32, 0x8f, 0x8f, 0x45, + 0x1b, 0x1f, 0xb3, 0x2f, 0xe8, 0x1b, 0x16, 0x5c, 0xe5, 0x11, 0x13, 0x07, 0xef, 0x7f, 0x0f, 0x89, + 0xa9, 0x2e, 0x49, 0x25, 0x44, 0xd6, 0x49, 0xb8, 0xbb, 0xf8, 0xb2, 0x18, 0xd0, 0xab, 0xf5, 0xe3, + 0xb5, 0x85, 0x8f, 0xdb, 0x39, 0xfb, 0xdf, 0x14, 0x61, 0x42, 0x78, 0xf0, 0x8b, 0xd0, 0x30, 0x2f, + 0x19, 0x4b, 0xe2, 0x89, 0xd4, 0x92, 0x98, 0x31, 0x90, 0x4f, 0x26, 0x2a, 0x4c, 0x04, 0x33, 0x9e, + 0x13, 0xc5, 0x37, 0x88, 0x13, 0xc6, 0x1b, 0xc4, 0xe1, 0xb6, 0x3b, 0xc5, 0x63, 0xdb, 0x19, 0x29, + 0x15, 0xcd, 0xad, 0x34, 0x31, 0xdc, 0x4d, 0x1f, 0xed, 0x01, 0x62, 0x06, 0x48, 0xa1, 0xe3, 0x47, + 0xfc, 0x5b, 0x5c, 0xf1, 0x66, 0x70, 0xbc, 0x56, 0xe7, 0x44, 0xab, 0xe8, 0x56, 0x17, 0x35, 0x9c, + 0xd1, 0x82, 0x66, 0x58, 0x36, 0x34, 0xa8, 0x61, 0xd9, 0x70, 0x1f, 0xd7, 0x1a, 0x1f, 0xa6, 0xbb, + 0x82, 0x30, 0xbc, 0x0d, 0x65, 0x65, 0x00, 0x28, 0x0e, 0x9d, 0xde, 0xb1, 0x4c, 0xd2, 0x14, 0xb8, + 0x1a, 0x25, 0x31, 0x3e, 0x4d, 0xc8, 0xd9, 0xff, 0xac, 0x60, 0x34, 0xc8, 0x27, 0x71, 0x0d, 0x46, + 0x9d, 0x28, 0x72, 0xb7, 0x7c, 0xd2, 0x12, 0x3b, 0xf6, 0xe3, 0x79, 0x3b, 0xd6, 0x68, 0x86, 0x19, + 0x61, 0x2e, 0x88, 0x9a, 0x58, 0xd1, 0x40, 0x37, 0xb8, 0x85, 0xd4, 0x9e, 0xe4, 0xf9, 0x07, 0xa3, + 0x06, 0xd2, 0x86, 0x6a, 0x8f, 0x60, 0x51, 0x1f, 0x7d, 0x81, 0x9b, 0xb0, 0xdd, 0xf4, 0x83, 0x7b, + 0xfe, 0xf5, 0x20, 0x90, 0x6e, 0x77, 0x83, 0x11, 0x9c, 0x91, 0x86, 0x6b, 0xaa, 0x3a, 0x36, 0xa9, + 0x0d, 0x16, 0xa8, 0xe8, 0xbb, 0xe1, 0x0c, 0x25, 0x6d, 0x3a, 0xcf, 0x44, 0x88, 0xc0, 0x94, 0x08, + 0x0f, 0x21, 0xcb, 0xc4, 0xd8, 0x65, 0xb2, 0xf3, 0x66, 0xed, 0x44, 0xe9, 0x77, 0xd3, 0x24, 0x81, + 0xd3, 0x34, 0xed, 0x9f, 0xb4, 0x80, 0x99, 0xfd, 0x9f, 0x02, 0xcb, 0xf0, 0x59, 0x93, 0x65, 0x98, + 0xcd, 0x1b, 0xe4, 0x1c, 0x6e, 0xe1, 0x45, 0xbe, 0xb2, 0xea, 0x61, 0x70, 0x7f, 0x5f, 0x98, 0x0f, + 0xf4, 0xe7, 0x64, 0xed, 0xff, 0x6b, 0xf1, 0x43, 0x4c, 0x79, 0xe2, 0xa3, 0xef, 0x81, 0xd1, 0xa6, + 0xd3, 0x76, 0x9a, 0x3c, 0x8e, 0x71, 0xae, 0x56, 0xc7, 0xa8, 0x34, 0xbf, 0x24, 0x6a, 0x70, 0x2d, + 0x85, 0x0c, 0x33, 0x32, 0x2a, 0x8b, 0xfb, 0x6a, 0x26, 0x54, 0x93, 0x73, 0x3b, 0x30, 0x61, 0x10, + 0x7b, 0xa8, 0x22, 0xed, 0xf7, 0xf0, 0x2b, 0x56, 0x85, 0xc5, 0xd9, 0x85, 0x19, 0x5f, 0xfb, 0x4f, + 0x2f, 0x14, 0x29, 0xa6, 0x7c, 0xbc, 0xdf, 0x25, 0xca, 0x6e, 0x1f, 0xcd, 0xad, 0x21, 0x45, 0x06, + 0x77, 0x53, 0xb6, 0x7f, 0xdc, 0x82, 0x47, 0x75, 0x44, 0x2d, 0x48, 0x42, 0x3f, 0x3d, 0x71, 0x15, + 0x46, 0x83, 0x36, 0x09, 0x9d, 0x38, 0x08, 0xc5, 0xad, 0x71, 0x45, 0x0e, 0xfa, 0x6d, 0x51, 0x7e, + 0x24, 0x02, 0x4a, 0x4a, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0x54, 0x8e, 0x61, 0x83, 0x11, 0x89, 0x00, + 0x16, 0xec, 0x0c, 0x60, 0x4f, 0xa6, 0x11, 0x16, 0x10, 0xfb, 0x0f, 0x2c, 0xbe, 0xb0, 0xf4, 0xae, + 0xa3, 0xf7, 0x60, 0x7a, 0xd7, 0x89, 0x9b, 0xdb, 0xcb, 0xf7, 0xdb, 0x21, 0x57, 0x8f, 0xcb, 0x71, + 0x7a, 0xa6, 0xdf, 0x38, 0x69, 0x1f, 0x99, 0x58, 0xe5, 0xad, 0xa6, 0x88, 0xe1, 0x2e, 0xf2, 0x68, + 0x03, 0xc6, 0x58, 0x19, 0x33, 0xff, 0x8e, 0x7a, 0xb1, 0x06, 0x79, 0xad, 0xa9, 0x57, 0xe7, 0xd5, + 0x84, 0x0e, 0xd6, 0x89, 0xda, 0x5f, 0x2e, 0xf2, 0xdd, 0xce, 0xb8, 0xed, 0xa7, 0x61, 0xa4, 0x1d, + 0xb4, 0x96, 0x6a, 0x55, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x75, 0x5e, 0x8c, 0x25, 0x1c, 0xbd, 0x0a, + 0x40, 0xee, 0xc7, 0x24, 0xf4, 0x1d, 0x4f, 0x59, 0xc9, 0x28, 0xbb, 0xd0, 0x6a, 0xb0, 0x16, 0xc4, + 0x77, 0x22, 0xf2, 0x5d, 0xcb, 0x0a, 0x05, 0x6b, 0xe8, 0xe8, 0x1a, 0x40, 0x3b, 0x0c, 0xf6, 0xdc, + 0x16, 0xf3, 0x27, 0x2c, 0x9a, 0x36, 0x24, 0x75, 0x05, 0xc1, 0x1a, 0x16, 0x7a, 0x15, 0x26, 0x3a, + 0x7e, 0xc4, 0x39, 0x14, 0x67, 0x43, 0x84, 0x63, 0x1c, 0x4d, 0xac, 0x1b, 0xee, 0xe8, 0x40, 0x6c, + 0xe2, 0xa2, 0x05, 0x18, 0x8e, 0x1d, 0x66, 0x13, 0x31, 0x94, 0x6f, 0xcc, 0xb9, 0x4e, 0x31, 0xf4, + 0x28, 0xba, 0xb4, 0x02, 0x16, 0x15, 0xd1, 0xdb, 0xd2, 0x39, 0x83, 0x9f, 0xf5, 0xc2, 0x8a, 0x7a, + 0xb0, 0x7b, 0x41, 0x73, 0xcd, 0x10, 0xd6, 0xd9, 0x06, 0x2d, 0xfb, 0x1b, 0x65, 0x80, 0x84, 0x1d, + 0x47, 0xef, 0x77, 0x9d, 0x47, 0xcf, 0xf6, 0x66, 0xe0, 0x4f, 0xee, 0x30, 0x42, 0xdf, 0x6f, 0xc1, + 0x98, 0xe3, 0x79, 0x41, 0xd3, 0x89, 0xd9, 0x28, 0x17, 0x7a, 0x9f, 0x87, 0xa2, 0xfd, 0x85, 0xa4, + 0x06, 0xef, 0xc2, 0x0b, 0x72, 0xe1, 0x69, 0x90, 0xbe, 0xbd, 0xd0, 0x1b, 0x46, 0x9f, 0x92, 0x52, + 0x1a, 0x5f, 0x1e, 0x73, 0x69, 0x29, 0xad, 0xcc, 0x8e, 0x7e, 0x4d, 0x40, 0x43, 0x77, 0x8c, 0x48, + 0x7b, 0xa5, 0xfc, 0xa0, 0x13, 0x06, 0x57, 0xda, 0x2f, 0xc8, 0x1e, 0xaa, 0xeb, 0xde, 0x64, 0x43, + 0xf9, 0x91, 0x59, 0x34, 0xf1, 0xa7, 0x8f, 0x27, 0xd9, 0xbb, 0x30, 0xd5, 0x32, 0xef, 0x76, 0xb1, + 0x9a, 0x9e, 0xca, 0xa3, 0x9b, 0x62, 0x05, 0x92, 0xdb, 0x3c, 0x05, 0xc0, 0x69, 0xc2, 0xa8, 0xce, + 0xfd, 0xfa, 0x6a, 0xfe, 0x66, 0x20, 0xac, 0xf1, 0xed, 0xdc, 0xb9, 0xdc, 0x8f, 0x62, 0xb2, 0x4b, + 0x31, 0x93, 0x4b, 0x7b, 0x4d, 0xd4, 0xc5, 0x8a, 0x0a, 0x7a, 0x03, 0x86, 0x99, 0x63, 0x70, 0x34, + 0x3b, 0x9a, 0xaf, 0x4c, 0x34, 0x63, 0x5a, 0x24, 0x9b, 0x8a, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0x6e, + 0xc8, 0xc0, 0x37, 0x51, 0xcd, 0xbf, 0x13, 0x11, 0x16, 0xf8, 0xa6, 0xbc, 0xf8, 0xf1, 0x24, 0xa6, + 0x0d, 0x2f, 0xcf, 0x8c, 0x97, 0x6f, 0xd4, 0xa4, 0xcc, 0x91, 0xf8, 0x2f, 0xc3, 0xf0, 0xcf, 0x42, + 0x7e, 0xf7, 0xcc, 0x50, 0xfd, 0xc9, 0x70, 0xde, 0x35, 0x49, 0xe0, 0x34, 0x4d, 0xca, 0x68, 0xf2, + 0x9d, 0x2b, 0xec, 0xf9, 0xfb, 0xed, 0x7f, 0x2e, 0x5f, 0xb3, 0x4b, 0x86, 0x97, 0x60, 0x51, 0xff, + 0x54, 0x6f, 0xfd, 0x39, 0x1f, 0xa6, 0xd3, 0x5b, 0xf4, 0xa1, 0x72, 0x19, 0xbf, 0x5f, 0x82, 0x49, + 0x73, 0x49, 0xa1, 0xab, 0x50, 0x16, 0x44, 0x54, 0x14, 0x56, 0xb5, 0x4b, 0x56, 0x25, 0x00, 0x27, + 0x38, 0x2c, 0xf8, 0x2e, 0xab, 0xae, 0xd9, 0x61, 0x26, 0xc1, 0x77, 0x15, 0x04, 0x6b, 0x58, 0x54, + 0x5e, 0xda, 0x08, 0x82, 0x58, 0x5d, 0x2a, 0x6a, 0xdd, 0x2d, 0xb2, 0x52, 0x2c, 0xa0, 0xf4, 0x32, + 0xd9, 0x21, 0xa1, 0x4f, 0x3c, 0x33, 0xb8, 0x9b, 0xba, 0x4c, 0x6e, 0xea, 0x40, 0x6c, 0xe2, 0xd2, + 0x5b, 0x32, 0x88, 0xd8, 0x42, 0x16, 0x52, 0x59, 0x62, 0xd7, 0xda, 0xe0, 0x2e, 0xf6, 0x12, 0x8e, + 0x3e, 0x0f, 0x8f, 0x2a, 0x8f, 0x78, 0xcc, 0x15, 0xd5, 0xb2, 0xc5, 0x61, 0x43, 0x89, 0xf2, 0xe8, + 0x52, 0x36, 0x1a, 0xce, 0xab, 0x8f, 0x5e, 0x87, 0x49, 0xc1, 0xb9, 0x4b, 0x8a, 0x23, 0xa6, 0xed, + 0xc4, 0x4d, 0x03, 0x8a, 0x53, 0xd8, 0x32, 0x3c, 0x1d, 0x63, 0x9e, 0x25, 0x85, 0xd1, 0xee, 0xf0, + 0x74, 0x3a, 0x1c, 0x77, 0xd5, 0x40, 0x0b, 0x30, 0xc5, 0x59, 0x2b, 0xd7, 0xdf, 0xe2, 0x73, 0x22, + 0xdc, 0x6d, 0xd4, 0x96, 0xba, 0x6d, 0x82, 0x71, 0x1a, 0x1f, 0xbd, 0x02, 0xe3, 0x4e, 0xd8, 0xdc, + 0x76, 0x63, 0xd2, 0x8c, 0x3b, 0x21, 0xf7, 0xc3, 0xd1, 0x8c, 0x4f, 0x16, 0x34, 0x18, 0x36, 0x30, + 0xed, 0xf7, 0xe1, 0x4c, 0x86, 0xa7, 0x1e, 0x5d, 0x38, 0x4e, 0xdb, 0x95, 0xdf, 0x94, 0xb2, 0x50, + 0x5d, 0xa8, 0xd7, 0xe4, 0xd7, 0x68, 0x58, 0x74, 0x75, 0x32, 0x8f, 0x3e, 0x2d, 0xeb, 0x86, 0x5a, + 0x9d, 0x2b, 0x12, 0x80, 0x13, 0x1c, 0xfb, 0x7f, 0x15, 0x60, 0x2a, 0x43, 0xf9, 0xce, 0x32, 0x3f, + 0xa4, 0x64, 0x8f, 0x24, 0xd1, 0x83, 0x19, 0xed, 0xb0, 0x70, 0x8c, 0x68, 0x87, 0xc5, 0x7e, 0xd1, + 0x0e, 0x4b, 0x1f, 0x24, 0xda, 0xa1, 0x39, 0x62, 0x43, 0x03, 0x8d, 0x58, 0x46, 0x84, 0xc4, 0xe1, + 0x63, 0x46, 0x48, 0x34, 0x06, 0x7d, 0x64, 0x80, 0x41, 0xff, 0x5a, 0x01, 0xa6, 0xd3, 0x46, 0x72, + 0xa7, 0xa0, 0x8e, 0x7d, 0xc3, 0x50, 0xc7, 0x66, 0xe7, 0x51, 0x49, 0x9b, 0xee, 0xe5, 0xa9, 0x66, + 0x71, 0x4a, 0x35, 0xfb, 0xc9, 0x81, 0xa8, 0xf5, 0x56, 0xd3, 0xfe, 0x83, 0x02, 0x9c, 0x4b, 0x57, + 0x59, 0xf2, 0x1c, 0x77, 0xf7, 0x14, 0xc6, 0xe6, 0xb6, 0x31, 0x36, 0xcf, 0x0d, 0xf2, 0x35, 0xac, + 0x6b, 0xb9, 0x03, 0xf4, 0x56, 0x6a, 0x80, 0xae, 0x0e, 0x4e, 0xb2, 0xf7, 0x28, 0x7d, 0xb3, 0x08, + 0x17, 0x33, 0xeb, 0x25, 0xda, 0xcc, 0x15, 0x43, 0x9b, 0x79, 0x2d, 0xa5, 0xcd, 0xb4, 0x7b, 0xd7, + 0x3e, 0x19, 0xf5, 0xa6, 0x70, 0xa1, 0x64, 0x11, 0xf1, 0x1e, 0x50, 0xb5, 0x69, 0xb8, 0x50, 0x2a, + 0x42, 0xd8, 0xa4, 0xfb, 0x17, 0x49, 0xa5, 0xf9, 0xef, 0x2c, 0x38, 0x9f, 0x39, 0x37, 0xa7, 0xa0, + 0xc2, 0x5a, 0x33, 0x55, 0x58, 0x4f, 0x0f, 0xbc, 0x5a, 0x73, 0x74, 0x5a, 0xbf, 0x51, 0xca, 0xf9, + 0x16, 0x26, 0xa0, 0xdf, 0x86, 0x31, 0xa7, 0xd9, 0x24, 0x51, 0xb4, 0x1a, 0xb4, 0x54, 0x84, 0xb8, + 0xe7, 0x98, 0x9c, 0x95, 0x14, 0x1f, 0x1d, 0x54, 0xe6, 0xd2, 0x24, 0x12, 0x30, 0xd6, 0x29, 0x98, + 0x41, 0x2d, 0x0b, 0x27, 0x1a, 0xd4, 0xf2, 0x1a, 0xc0, 0x9e, 0xe2, 0xd6, 0xd3, 0x42, 0xbe, 0xc6, + 0xc7, 0x6b, 0x58, 0xe8, 0x0b, 0x30, 0x1a, 0x89, 0x6b, 0x5c, 0x2c, 0xc5, 0x17, 0x06, 0x9c, 0x2b, + 0x67, 0x83, 0x78, 0xa6, 0xaf, 0xbe, 0xd2, 0x87, 0x28, 0x92, 0xe8, 0x3b, 0x60, 0x3a, 0xe2, 0xa1, + 0x60, 0x96, 0x3c, 0x27, 0x62, 0x7e, 0x10, 0x62, 0x15, 0x32, 0x07, 0xfc, 0x46, 0x0a, 0x86, 0xbb, + 0xb0, 0xd1, 0x8a, 0xfc, 0x28, 0x16, 0xb7, 0x86, 0x2f, 0xcc, 0xcb, 0xc9, 0x07, 0x89, 0xbc, 0x53, + 0x67, 0xd3, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x89, 0xbe, 0x00, 0x40, 0x97, 0x8f, 0xd0, 0x25, 0x8c, + 0xe4, 0x1f, 0x9e, 0xf4, 0x54, 0x69, 0x65, 0x5a, 0x7e, 0x32, 0xe7, 0xc5, 0xaa, 0x22, 0x82, 0x35, + 0x82, 0xf6, 0xd7, 0x4a, 0xf0, 0x58, 0x8f, 0x33, 0x12, 0x2d, 0x98, 0x4f, 0xa0, 0xcf, 0xa4, 0x85, + 0xeb, 0xb9, 0xcc, 0xca, 0x86, 0xb4, 0x9d, 0x5a, 0x8a, 0x85, 0x0f, 0xbc, 0x14, 0x7f, 0xc0, 0xd2, + 0xd4, 0x1e, 0xdc, 0x98, 0xef, 0xb3, 0xc7, 0x3c, 0xfb, 0x4f, 0x50, 0x0f, 0xb2, 0x99, 0xa1, 0x4c, + 0xb8, 0x36, 0x70, 0x77, 0x06, 0xd6, 0x2e, 0x9c, 0xae, 0xf2, 0xf7, 0xcb, 0x16, 0x3c, 0x91, 0xd9, + 0x5f, 0xc3, 0x64, 0xe3, 0x2a, 0x94, 0x9b, 0xb4, 0x50, 0xf3, 0x55, 0x4b, 0x9c, 0x78, 0x25, 0x00, + 0x27, 0x38, 0x86, 0x65, 0x46, 0xa1, 0xaf, 0x65, 0xc6, 0xbf, 0xb6, 0xa0, 0x6b, 0x7f, 0x9c, 0xc2, + 0x41, 0x5d, 0x33, 0x0f, 0xea, 0x8f, 0x0f, 0x32, 0x97, 0x39, 0x67, 0xf4, 0x1f, 0x4d, 0xc1, 0x23, + 0x39, 0xbe, 0x1a, 0x7b, 0x30, 0xb3, 0xd5, 0x24, 0xa6, 0x17, 0xa0, 0xf8, 0x98, 0x4c, 0x87, 0xc9, + 0x9e, 0x2e, 0x83, 0x2c, 0x1f, 0xd1, 0x4c, 0x17, 0x0a, 0xee, 0x6e, 0x02, 0x7d, 0xd9, 0x82, 0xb3, + 0xce, 0xbd, 0xa8, 0x2b, 0xeb, 0xa4, 0x58, 0x33, 0x2f, 0x66, 0x2a, 0x41, 0xfa, 0x64, 0xa9, 0xe4, + 0x09, 0x9a, 0xb2, 0xb0, 0x70, 0x66, 0x5b, 0x08, 0x8b, 0x98, 0xa1, 0x94, 0x9d, 0xef, 0xe1, 0xa7, + 0x9a, 0xe5, 0x54, 0xc3, 0x8f, 0x6c, 0x09, 0xc1, 0x8a, 0x0e, 0xfa, 0x12, 0x94, 0xb7, 0xa4, 0xa7, + 0x5b, 0xc6, 0x95, 0x90, 0x0c, 0x64, 0x6f, 0xff, 0x3f, 0xfe, 0x40, 0xa9, 0x90, 0x70, 0x42, 0x14, + 0xbd, 0x0e, 0x45, 0x7f, 0x33, 0xea, 0x95, 0xe3, 0x28, 0x65, 0xd3, 0xc4, 0xbd, 0xc1, 0xd7, 0x56, + 0x1a, 0x98, 0x56, 0x44, 0x37, 0xa0, 0x18, 0x6e, 0xb4, 0x84, 0x06, 0x2f, 0xf3, 0x0c, 0xc7, 0x8b, + 0xd5, 0x9c, 0x5e, 0x31, 0x4a, 0x78, 0xb1, 0x8a, 0x29, 0x09, 0x54, 0x87, 0x21, 0xe6, 0xe0, 0x20, + 0xee, 0x83, 0x4c, 0xce, 0xb7, 0x87, 0xa3, 0x10, 0x77, 0x19, 0x67, 0x08, 0x98, 0x13, 0x42, 0xeb, + 0x30, 0xdc, 0x64, 0xf9, 0x70, 0x44, 0xc0, 0xea, 0x4f, 0x65, 0xea, 0xea, 0x7a, 0x24, 0x0a, 0x12, + 0xaa, 0x2b, 0x86, 0x81, 0x05, 0x2d, 0x46, 0x95, 0xb4, 0xb7, 0x37, 0x23, 0x26, 0xeb, 0xe7, 0x51, + 0xed, 0x91, 0xff, 0x4a, 0x50, 0x65, 0x18, 0x58, 0xd0, 0x42, 0x9f, 0x81, 0xc2, 0x66, 0x53, 0xf8, + 0x3f, 0x64, 0x2a, 0xed, 0x4c, 0x87, 0xfe, 0xc5, 0xe1, 0xc3, 0x83, 0x4a, 0x61, 0x65, 0x09, 0x17, + 0x36, 0x9b, 0x68, 0x0d, 0x46, 0x36, 0xb9, 0x0b, 0xb0, 0xd0, 0xcb, 0x3d, 0x95, 0xed, 0x9d, 0xdc, + 0xe5, 0x25, 0xcc, 0xed, 0xf6, 0x05, 0x00, 0x4b, 0x22, 0x2c, 0x04, 0xa7, 0x72, 0x65, 0x16, 0xb1, + 0xa8, 0xe7, 0x8f, 0xe7, 0x7e, 0xce, 0xef, 0xe7, 0xc4, 0x21, 0x1a, 0x6b, 0x14, 0xe9, 0xaa, 0x76, + 0x64, 0xe6, 0x43, 0x11, 0xab, 0x23, 0x73, 0x55, 0xf7, 0x49, 0x0a, 0xc9, 0x57, 0xb5, 0x42, 0xc2, + 0x09, 0x51, 0xb4, 0x03, 0x13, 0x7b, 0x51, 0x7b, 0x9b, 0xc8, 0x2d, 0xcd, 0x42, 0x77, 0xe4, 0x5c, + 0x61, 0x77, 0x05, 0xa2, 0x1b, 0xc6, 0x1d, 0xc7, 0xeb, 0x3a, 0x85, 0xd8, 0xab, 0xf6, 0x5d, 0x9d, + 0x18, 0x36, 0x69, 0xd3, 0xe1, 0x7f, 0xaf, 0x13, 0x6c, 0xec, 0xc7, 0x44, 0x04, 0xaf, 0xce, 0x1c, + 0xfe, 0x37, 0x39, 0x4a, 0xf7, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x5d, 0x31, 0x3c, 0xec, 0xf4, + 0x9c, 0xce, 0x0f, 0xa6, 0x94, 0x99, 0x7a, 0x54, 0x1b, 0x14, 0x76, 0x5a, 0x26, 0xa4, 0xd8, 0x29, + 0xd9, 0xde, 0x0e, 0xe2, 0xc0, 0x4f, 0x9d, 0xd0, 0x33, 0xf9, 0xa7, 0x64, 0x3d, 0x03, 0xbf, 0xfb, + 0x94, 0xcc, 0xc2, 0xc2, 0x99, 0x6d, 0xa1, 0x16, 0x4c, 0xb6, 0x83, 0x30, 0xbe, 0x17, 0x84, 0x72, + 0x7d, 0xa1, 0x1e, 0x7a, 0x05, 0x03, 0x53, 0xb4, 0xc8, 0x82, 0xa9, 0x9b, 0x10, 0x9c, 0xa2, 0x89, + 0x3e, 0x07, 0x23, 0x51, 0xd3, 0xf1, 0x48, 0xed, 0xf6, 0xec, 0x99, 0xfc, 0xeb, 0xa7, 0xc1, 0x51, + 0x72, 0x56, 0x17, 0x9b, 0x1c, 0x81, 0x82, 0x25, 0x39, 0xb4, 0x02, 0x43, 0x2c, 0x23, 0x02, 0x8b, + 0xbb, 0x9d, 0x13, 0x13, 0xaa, 0xcb, 0xc2, 0x94, 0x9f, 0x4d, 0xac, 0x18, 0xf3, 0xea, 0x74, 0x0f, + 0x08, 0xf6, 0x3a, 0x88, 0x66, 0xcf, 0xe5, 0xef, 0x01, 0xc1, 0x95, 0xdf, 0x6e, 0xf4, 0xda, 0x03, + 0x0a, 0x09, 0x27, 0x44, 0xe9, 0xc9, 0x4c, 0x4f, 0xd3, 0x47, 0x7a, 0x18, 0xb4, 0xe4, 0x9e, 0xa5, + 0xec, 0x64, 0xa6, 0x27, 0x29, 0x25, 0x61, 0xff, 0xee, 0x48, 0x37, 0xcf, 0xc2, 0x04, 0xb2, 0xbf, + 0x6a, 0x75, 0xbd, 0xd5, 0x7d, 0x7a, 0x50, 0xfd, 0xd0, 0x09, 0x72, 0xab, 0x5f, 0xb6, 0xe0, 0x91, + 0x76, 0xe6, 0x87, 0x08, 0x06, 0x60, 0x30, 0x35, 0x13, 0xff, 0x74, 0x15, 0x1b, 0x3f, 0x1b, 0x8e, + 0x73, 0x5a, 0x4a, 0x4b, 0x04, 0xc5, 0x0f, 0x2c, 0x11, 0xac, 0xc2, 0x28, 0x63, 0x32, 0xfb, 0xe4, + 0x87, 0x4b, 0x0b, 0x46, 0x8c, 0x95, 0x58, 0x12, 0x15, 0xb1, 0x22, 0x81, 0x7e, 0xd0, 0x82, 0x0b, + 0xe9, 0xae, 0x63, 0xc2, 0xc0, 0x22, 0x92, 0x3c, 0x97, 0x05, 0x57, 0xc4, 0xf7, 0x5f, 0xa8, 0xf7, + 0x42, 0x3e, 0xea, 0x87, 0x80, 0x7b, 0x37, 0x86, 0xaa, 0x19, 0xc2, 0xe8, 0xb0, 0xa9, 0x80, 0x1f, + 0x40, 0x20, 0x7d, 0x11, 0xc6, 0x77, 0x83, 0x8e, 0x1f, 0x0b, 0xfb, 0x17, 0xe1, 0xb1, 0xc8, 0x1e, + 0x9c, 0x57, 0xb5, 0x72, 0x6c, 0x60, 0xa5, 0xc4, 0xd8, 0xd1, 0x07, 0x16, 0x63, 0xdf, 0x49, 0x65, + 0x01, 0x2f, 0xe7, 0x47, 0x2c, 0x14, 0x12, 0xff, 0x31, 0x72, 0x81, 0x9f, 0xae, 0x6c, 0xf4, 0xd3, + 0x56, 0x06, 0x53, 0xcf, 0xa5, 0xe5, 0xd7, 0x4c, 0x69, 0xf9, 0x72, 0x5a, 0x5a, 0xee, 0x52, 0xbe, + 0x1a, 0x82, 0xf2, 0xe0, 0x61, 0xaf, 0x07, 0x8d, 0x23, 0x67, 0x7b, 0x70, 0xa9, 0xdf, 0xb5, 0xc4, + 0x0c, 0xa1, 0x5a, 0xea, 0xa9, 0x2d, 0x31, 0x84, 0x6a, 0xd5, 0xaa, 0x98, 0x41, 0x06, 0x0d, 0x34, + 0x62, 0xff, 0x0f, 0x0b, 0x8a, 0xf5, 0xa0, 0x75, 0x0a, 0xca, 0xe4, 0xcf, 0x1a, 0xca, 0xe4, 0xc7, + 0x72, 0xb2, 0xb3, 0xe7, 0xaa, 0x8e, 0x97, 0x53, 0xaa, 0xe3, 0x0b, 0x79, 0x04, 0x7a, 0x2b, 0x8a, + 0x7f, 0xa2, 0x08, 0x7a, 0x2e, 0x79, 0xf4, 0x1b, 0x0f, 0x62, 0x85, 0x5c, 0xec, 0x95, 0x5e, 0x5e, + 0x50, 0x66, 0xf6, 0x53, 0xd2, 0x09, 0xef, 0xcf, 0x99, 0x31, 0xf2, 0x5b, 0xc4, 0xdd, 0xda, 0x8e, + 0x49, 0x2b, 0xfd, 0x39, 0xa7, 0x67, 0x8c, 0xfc, 0xdf, 0x2c, 0x98, 0x4a, 0xb5, 0x8e, 0x3c, 0x98, + 0xf0, 0x74, 0x4d, 0xa0, 0x58, 0xa7, 0x0f, 0xa4, 0x44, 0x14, 0xc6, 0x9c, 0x5a, 0x11, 0x36, 0x89, + 0xa3, 0x79, 0x00, 0xf5, 0x52, 0x27, 0x35, 0x60, 0x8c, 0xeb, 0x57, 0x4f, 0x79, 0x11, 0xd6, 0x30, + 0xd0, 0x4b, 0x30, 0x16, 0x07, 0xed, 0xc0, 0x0b, 0xb6, 0xf6, 0x6f, 0x12, 0x19, 0xda, 0x46, 0x99, + 0x68, 0xad, 0x27, 0x20, 0xac, 0xe3, 0xd9, 0x3f, 0x55, 0xe4, 0x1f, 0xea, 0xc7, 0xee, 0xb7, 0xd6, + 0xe4, 0x47, 0x7b, 0x4d, 0x7e, 0xd3, 0x82, 0x69, 0xda, 0x3a, 0x33, 0x17, 0x91, 0x97, 0xad, 0x4a, + 0xbf, 0x63, 0xf5, 0x48, 0xbf, 0x73, 0x99, 0x9e, 0x5d, 0xad, 0xa0, 0x13, 0x0b, 0x0d, 0x9a, 0x76, + 0x38, 0xd1, 0x52, 0x2c, 0xa0, 0x02, 0x8f, 0x84, 0xa1, 0xf0, 0x81, 0xd2, 0xf1, 0x48, 0x18, 0x62, + 0x01, 0x95, 0xd9, 0x79, 0x4a, 0x39, 0xd9, 0x79, 0x58, 0xa0, 0x3e, 0x61, 0x58, 0x20, 0xd8, 0x1e, + 0x2d, 0x50, 0x9f, 0xb4, 0x38, 0x48, 0x70, 0xec, 0x9f, 0x2b, 0xc2, 0x78, 0x3d, 0x68, 0x25, 0x6f, + 0x65, 0x2f, 0x1a, 0x6f, 0x65, 0x97, 0x52, 0x6f, 0x65, 0xd3, 0x3a, 0xee, 0xb7, 0x5e, 0xc6, 0x3e, + 0xac, 0x97, 0xb1, 0x7f, 0x65, 0xb1, 0x59, 0xab, 0xae, 0x35, 0x44, 0x76, 0xe0, 0xe7, 0x61, 0x8c, + 0x1d, 0x48, 0xcc, 0xe9, 0x4e, 0x3e, 0x20, 0xb1, 0xc0, 0xfb, 0x6b, 0x49, 0x31, 0xd6, 0x71, 0xd0, + 0x15, 0x18, 0x8d, 0x88, 0x13, 0x36, 0xb7, 0xd5, 0x19, 0x27, 0x9e, 0x57, 0x78, 0x19, 0x56, 0x50, + 0xf4, 0x66, 0x12, 0x23, 0xae, 0x98, 0x9f, 0xe7, 0x56, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0x81, 0xe1, + 0xec, 0xb7, 0x00, 0x75, 0xe3, 0x0f, 0x10, 0x1c, 0xa9, 0x62, 0x06, 0x47, 0x2a, 0x77, 0x05, 0x46, + 0xfa, 0x53, 0x0b, 0x26, 0xeb, 0x41, 0x8b, 0x6e, 0xdd, 0xbf, 0x48, 0xfb, 0x54, 0x0f, 0x90, 0x39, + 0xdc, 0x23, 0x40, 0xe6, 0x3f, 0xb4, 0x60, 0xa4, 0x1e, 0xb4, 0x4e, 0x41, 0xef, 0xfe, 0x9a, 0xa9, + 0x77, 0x7f, 0x34, 0x67, 0x49, 0xe4, 0xa8, 0xda, 0x7f, 0xa1, 0x08, 0x13, 0xb4, 0x9f, 0xc1, 0x96, + 0x9c, 0x25, 0x63, 0x44, 0xac, 0x01, 0x46, 0x84, 0xb2, 0xb9, 0x81, 0xe7, 0x05, 0xf7, 0xd2, 0x33, + 0xb6, 0xc2, 0x4a, 0xb1, 0x80, 0xa2, 0x67, 0x61, 0xb4, 0x1d, 0x92, 0x3d, 0x37, 0x10, 0xfc, 0xa3, + 0xf6, 0x8a, 0x51, 0x17, 0xe5, 0x58, 0x61, 0x50, 0xb9, 0x2b, 0x72, 0xfd, 0x26, 0x91, 0x49, 0xb6, + 0x4b, 0x2c, 0x0f, 0x17, 0x8f, 0x7c, 0xad, 0x95, 0x63, 0x03, 0x0b, 0xbd, 0x05, 0x65, 0xf6, 0x9f, + 0x9d, 0x28, 0xc7, 0xcf, 0x1b, 0x24, 0xd2, 0x4d, 0x08, 0x02, 0x38, 0xa1, 0x85, 0xae, 0x01, 0xc4, + 0x32, 0x3a, 0x72, 0x24, 0x62, 0xdc, 0x28, 0x5e, 0x5b, 0xc5, 0x4d, 0x8e, 0xb0, 0x86, 0x85, 0x9e, + 0x81, 0x72, 0xec, 0xb8, 0xde, 0x2d, 0xd7, 0x27, 0x11, 0x53, 0x39, 0x17, 0x65, 0x36, 0x09, 0x51, + 0x88, 0x13, 0x38, 0xe5, 0x75, 0x98, 0x03, 0x38, 0xcf, 0x3a, 0x36, 0xca, 0xb0, 0x19, 0xaf, 0x73, + 0x4b, 0x95, 0x62, 0x0d, 0xc3, 0x7e, 0x05, 0xce, 0xd5, 0x83, 0x56, 0x3d, 0x08, 0xe3, 0x95, 0x20, + 0xbc, 0xe7, 0x84, 0x2d, 0x39, 0x7f, 0x15, 0x99, 0xd8, 0x80, 0x9e, 0x3d, 0x43, 0x7c, 0x67, 0x1a, + 0x29, 0x0b, 0x5e, 0x60, 0xdc, 0xce, 0x31, 0x9d, 0x3a, 0x9a, 0xec, 0xde, 0x55, 0x09, 0x06, 0xaf, + 0x3b, 0x31, 0x41, 0xb7, 0x59, 0x52, 0xb2, 0xe4, 0x0a, 0x12, 0xd5, 0x9f, 0xd6, 0x92, 0x92, 0x25, + 0xc0, 0xcc, 0x3b, 0xcb, 0xac, 0x6f, 0xff, 0x6a, 0x91, 0x9d, 0x46, 0xa9, 0x7c, 0x7b, 0xe8, 0x8b, + 0x30, 0x19, 0x91, 0x5b, 0xae, 0xdf, 0xb9, 0x2f, 0x85, 0xf0, 0x1e, 0x6e, 0x39, 0x8d, 0x65, 0x1d, + 0x93, 0xab, 0xf2, 0xcc, 0x32, 0x9c, 0xa2, 0x46, 0xe7, 0x29, 0xec, 0xf8, 0x0b, 0xd1, 0x9d, 0x88, + 0x84, 0x22, 0xdf, 0x1b, 0x9b, 0x27, 0x2c, 0x0b, 0x71, 0x02, 0xa7, 0xeb, 0x92, 0xfd, 0x59, 0x0b, + 0x7c, 0x1c, 0x04, 0xb1, 0x5c, 0xc9, 0x2c, 0x63, 0x90, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x02, 0x28, + 0xea, 0xb4, 0xdb, 0x1e, 0x7b, 0xd8, 0x77, 0xbc, 0xeb, 0x61, 0xd0, 0x69, 0xf3, 0x57, 0xcf, 0x22, + 0x0f, 0x4c, 0xd8, 0xe8, 0x82, 0xe2, 0x8c, 0x1a, 0xf4, 0xf4, 0xd9, 0x8c, 0xd8, 0x6f, 0xb6, 0xba, + 0x8b, 0x42, 0xbd, 0xde, 0x60, 0x45, 0x58, 0xc2, 0xe8, 0x62, 0x62, 0xcd, 0x73, 0xcc, 0xe1, 0x64, + 0x31, 0x61, 0x55, 0x8a, 0x35, 0x0c, 0xb4, 0x0c, 0x23, 0xd1, 0x7e, 0xd4, 0x8c, 0x45, 0x44, 0xa6, + 0x9c, 0xcc, 0x9d, 0x0d, 0x86, 0xa2, 0x65, 0x93, 0xe0, 0x55, 0xb0, 0xac, 0x6b, 0x7f, 0x0f, 0xbb, + 0x0c, 0x59, 0x76, 0xb0, 0xb8, 0x13, 0x12, 0xb4, 0x0b, 0x13, 0x6d, 0x36, 0xe5, 0x22, 0x76, 0xb5, + 0x98, 0xb7, 0x17, 0x07, 0x94, 0x6a, 0xef, 0xd1, 0x83, 0x46, 0x69, 0x9d, 0x98, 0xb8, 0x50, 0xd7, + 0xc9, 0x61, 0x93, 0xba, 0xfd, 0x35, 0xc4, 0xce, 0xdc, 0x06, 0x17, 0x55, 0x47, 0x84, 0x69, 0xb1, + 0xe0, 0xcb, 0xe7, 0xf2, 0x75, 0x26, 0xc9, 0x17, 0x09, 0xf3, 0x64, 0x2c, 0xeb, 0xa2, 0x37, 0xd9, + 0x2b, 0x35, 0x3f, 0xe8, 0xfa, 0x25, 0x69, 0xe6, 0x58, 0xc6, 0x83, 0xb4, 0xa8, 0x88, 0x35, 0x22, + 0xe8, 0x16, 0x4c, 0x88, 0x64, 0x52, 0x42, 0x29, 0x56, 0x34, 0x94, 0x1e, 0x13, 0x58, 0x07, 0x1e, + 0xa5, 0x0b, 0xb0, 0x59, 0x19, 0x6d, 0xc1, 0x05, 0x2d, 0xb3, 0xe2, 0xf5, 0xd0, 0x61, 0x2f, 0x97, + 0x2e, 0xdb, 0x44, 0xda, 0xb9, 0xf9, 0xc4, 0xe1, 0x41, 0xe5, 0xc2, 0x7a, 0x2f, 0x44, 0xdc, 0x9b, + 0x0e, 0xba, 0x0d, 0xe7, 0xb8, 0x07, 0x5f, 0x95, 0x38, 0x2d, 0xcf, 0xf5, 0xd5, 0xc1, 0xcc, 0xd7, + 0xe1, 0xf9, 0xc3, 0x83, 0xca, 0xb9, 0x85, 0x2c, 0x04, 0x9c, 0x5d, 0x0f, 0xbd, 0x06, 0xe5, 0x96, + 0x1f, 0x89, 0x31, 0x18, 0x36, 0x92, 0x86, 0x96, 0xab, 0x6b, 0x0d, 0xf5, 0xfd, 0xc9, 0x1f, 0x9c, + 0x54, 0x40, 0x5b, 0x5c, 0x31, 0xa6, 0xe4, 0xd0, 0x91, 0xfc, 0x04, 0xf1, 0x62, 0x49, 0x18, 0x3e, + 0x3c, 0x5c, 0x23, 0xac, 0x6c, 0x60, 0x0d, 0xf7, 0x1e, 0x83, 0x30, 0x7a, 0x03, 0x10, 0x65, 0xd4, + 0xdc, 0x26, 0x59, 0x68, 0xb2, 0x10, 0xe2, 0x4c, 0x8f, 0x38, 0x6a, 0xf8, 0x4c, 0xa0, 0x46, 0x17, + 0x06, 0xce, 0xa8, 0x85, 0x6e, 0xd0, 0x83, 0x4c, 0x2f, 0x15, 0xb6, 0xbc, 0x92, 0xb9, 0x9f, 0xad, + 0x92, 0x76, 0x48, 0x9a, 0x4e, 0x4c, 0x5a, 0x26, 0x45, 0x9c, 0xaa, 0x47, 0xef, 0x52, 0x95, 0x4d, + 0x08, 0xcc, 0xb0, 0x19, 0xdd, 0x19, 0x85, 0xa8, 0x5c, 0xbc, 0x1d, 0x44, 0xf1, 0x1a, 0x89, 0xef, + 0x05, 0xe1, 0x8e, 0x88, 0x52, 0x96, 0x04, 0xcc, 0x4c, 0x40, 0x58, 0xc7, 0xa3, 0x7c, 0x30, 0x7b, + 0x26, 0xae, 0x55, 0xd9, 0x0b, 0xdd, 0x68, 0xb2, 0x4f, 0x6e, 0xf0, 0x62, 0x2c, 0xe1, 0x12, 0xb5, + 0x56, 0x5f, 0x62, 0xaf, 0x6d, 0x29, 0xd4, 0x5a, 0x7d, 0x09, 0x4b, 0x38, 0x22, 0xdd, 0x09, 0x59, + 0x27, 0xf3, 0xb5, 0x9a, 0xdd, 0xd7, 0xc1, 0x80, 0x39, 0x59, 0x7d, 0x98, 0x56, 0xa9, 0x60, 0x79, + 0xf8, 0xb6, 0x68, 0x76, 0x8a, 0x2d, 0x92, 0xc1, 0x63, 0xbf, 0x29, 0x3d, 0x71, 0x2d, 0x45, 0x09, + 0x77, 0xd1, 0x36, 0x02, 0x99, 0x4c, 0xf7, 0xcd, 0x06, 0x75, 0x15, 0xca, 0x51, 0x67, 0xa3, 0x15, + 0xec, 0x3a, 0xae, 0xcf, 0x1e, 0xc7, 0x34, 0x26, 0xab, 0x21, 0x01, 0x38, 0xc1, 0x41, 0x2b, 0x30, + 0xea, 0x48, 0x25, 0x30, 0xca, 0x8f, 0x5a, 0xa0, 0x54, 0xbf, 0xdc, 0x91, 0x57, 0xaa, 0x7d, 0x55, + 0x5d, 0xf4, 0x2a, 0x4c, 0x08, 0xbf, 0x2d, 0x1e, 0xcb, 0x81, 0x3d, 0x5e, 0x69, 0x86, 0xf9, 0x0d, + 0x1d, 0x88, 0x4d, 0x5c, 0xf4, 0x05, 0x98, 0xa4, 0x54, 0x92, 0x83, 0x6d, 0xf6, 0xec, 0x20, 0x27, + 0xa2, 0x96, 0xe5, 0x43, 0xaf, 0x8c, 0x53, 0xc4, 0x50, 0x0b, 0x1e, 0x77, 0x3a, 0x71, 0xc0, 0x14, + 0xe9, 0xe6, 0xfa, 0x5f, 0x0f, 0x76, 0x88, 0xcf, 0xde, 0xb0, 0x46, 0x17, 0x2f, 0x1d, 0x1e, 0x54, + 0x1e, 0x5f, 0xe8, 0x81, 0x87, 0x7b, 0x52, 0x41, 0x77, 0x60, 0x2c, 0x0e, 0x3c, 0x66, 0x22, 0x4f, + 0x59, 0x89, 0x47, 0xf2, 0x03, 0x01, 0xad, 0x2b, 0x34, 0x5d, 0x89, 0xa4, 0xaa, 0x62, 0x9d, 0x0e, + 0x5a, 0xe7, 0x7b, 0x8c, 0x85, 0x48, 0x25, 0xd1, 0xec, 0xa3, 0xf9, 0x03, 0xa3, 0x22, 0xa9, 0x9a, + 0x5b, 0x50, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x1d, 0x66, 0xda, 0xa1, 0x1b, 0xb0, 0x85, 0xad, 0x1e, + 0x31, 0x66, 0xcd, 0xc4, 0x0e, 0xf5, 0x34, 0x02, 0xee, 0xae, 0x43, 0x85, 0x4c, 0x59, 0x38, 0x7b, + 0x9e, 0x67, 0x09, 0xe3, 0x8c, 0x37, 0x2f, 0xc3, 0x0a, 0x8a, 0x56, 0xd9, 0xb9, 0xcc, 0xc5, 0xc1, + 0xd9, 0xb9, 0xfc, 0x68, 0x0f, 0xba, 0xd8, 0xc8, 0xf9, 0x25, 0xf5, 0x17, 0x27, 0x14, 0xe8, 0xbd, + 0x11, 0x6d, 0x3b, 0x21, 0xa9, 0x87, 0x41, 0x93, 0x44, 0x5a, 0x54, 0xe6, 0xc7, 0x78, 0x24, 0x47, + 0x7a, 0x6f, 0x34, 0xb2, 0x10, 0x70, 0x76, 0x3d, 0xd4, 0xd2, 0x92, 0x63, 0x53, 0x36, 0x34, 0x9a, + 0x7d, 0xbc, 0x87, 0xc1, 0x51, 0x8a, 0x67, 0x4d, 0xd6, 0xa2, 0x51, 0x1c, 0xe1, 0x14, 0x4d, 0xf4, + 0x1d, 0x30, 0x2d, 0x02, 0x1f, 0x25, 0xe3, 0x7e, 0x21, 0xb1, 0x64, 0xc4, 0x29, 0x18, 0xee, 0xc2, + 0xe6, 0xb1, 0xa8, 0x9d, 0x0d, 0x8f, 0x88, 0x45, 0x78, 0xcb, 0xf5, 0x77, 0xa2, 0xd9, 0x8b, 0xec, + 0xab, 0x45, 0x2c, 0xea, 0x34, 0x14, 0x67, 0xd4, 0x98, 0xfb, 0x76, 0x98, 0xe9, 0xba, 0xb9, 0x8e, + 0x15, 0xbf, 0xfd, 0x4f, 0x86, 0xa0, 0xac, 0x94, 0xf2, 0xe8, 0xaa, 0xf9, 0xd6, 0x72, 0x3e, 0xfd, + 0xd6, 0x32, 0x4a, 0x65, 0x03, 0xfd, 0x79, 0x65, 0xdd, 0x30, 0xd4, 0x2b, 0xe4, 0x67, 0x4b, 0xd3, + 0xb9, 0xfb, 0xbe, 0x4e, 0x7f, 0x9a, 0x8e, 0xa5, 0x38, 0xf0, 0xa3, 0x4d, 0xa9, 0xa7, 0xda, 0x66, + 0xc0, 0x64, 0xc5, 0xe8, 0x49, 0x2a, 0x20, 0xb5, 0x6a, 0xf5, 0x74, 0xf6, 0xce, 0x3a, 0x2d, 0xc4, + 0x1c, 0xc6, 0x04, 0x49, 0xca, 0x66, 0x31, 0x41, 0x72, 0xe4, 0x01, 0x05, 0x49, 0x49, 0x00, 0x27, + 0xb4, 0x90, 0x07, 0x33, 0x4d, 0x33, 0xf1, 0xaa, 0x72, 0xf4, 0x7b, 0xb2, 0x6f, 0x0a, 0xd4, 0x8e, + 0x96, 0xe5, 0x6e, 0x29, 0x4d, 0x05, 0x77, 0x13, 0x46, 0xaf, 0xc2, 0xe8, 0x7b, 0x41, 0xc4, 0x16, + 0xa5, 0xe0, 0x35, 0xa4, 0x43, 0xd4, 0xe8, 0x9b, 0xb7, 0x1b, 0xac, 0xfc, 0xe8, 0xa0, 0x32, 0x56, + 0x0f, 0x5a, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0x7d, 0x38, 0x67, 0x9c, 0xd0, 0xaa, 0xbb, 0x30, 0x78, + 0x77, 0x2f, 0x88, 0xe6, 0xce, 0xd5, 0xb2, 0x28, 0xe1, 0xec, 0x06, 0xe8, 0xb1, 0xe7, 0x07, 0x22, + 0x69, 0xb1, 0xe4, 0x67, 0x18, 0xdb, 0x52, 0xd6, 0xdd, 0xe1, 0x53, 0x08, 0xb8, 0xbb, 0x8e, 0xfd, + 0xcb, 0xfc, 0x0d, 0x43, 0x68, 0x3a, 0x49, 0xd4, 0xf1, 0x4e, 0x23, 0x27, 0xd6, 0xb2, 0xa1, 0x84, + 0x7d, 0xe0, 0x77, 0xb2, 0x5f, 0xb7, 0xd8, 0x3b, 0xd9, 0x3a, 0xd9, 0x6d, 0x7b, 0x54, 0xde, 0x7e, + 0xf8, 0x1d, 0x7f, 0x13, 0x46, 0x63, 0xd1, 0x5a, 0xaf, 0x34, 0x5e, 0x5a, 0xa7, 0xd8, 0x5b, 0xa1, + 0xe2, 0x74, 0x64, 0x29, 0x56, 0x64, 0xec, 0x7f, 0xc1, 0x67, 0x40, 0x42, 0x4e, 0x41, 0x21, 0x56, + 0x35, 0x15, 0x62, 0x95, 0x3e, 0x5f, 0x90, 0xa3, 0x18, 0xfb, 0xe7, 0x66, 0xbf, 0x99, 0x50, 0xf9, + 0x51, 0x7f, 0xa0, 0xb5, 0x7f, 0xd8, 0x82, 0xb3, 0x59, 0x16, 0x4d, 0x94, 0x3b, 0xe5, 0x22, 0xad, + 0x7a, 0xb0, 0x56, 0x23, 0x78, 0x57, 0x94, 0x63, 0x85, 0x31, 0x70, 0x86, 0x8c, 0xe3, 0x45, 0x8c, + 0xbb, 0x0d, 0x13, 0xf5, 0x90, 0x68, 0x77, 0xc0, 0xeb, 0xdc, 0xb3, 0x8e, 0xf7, 0xe7, 0xd9, 0x63, + 0x7b, 0xd5, 0xd9, 0x3f, 0x53, 0x80, 0xb3, 0xfc, 0xc5, 0x69, 0x61, 0x2f, 0x70, 0x5b, 0xf5, 0xa0, + 0x25, 0xb2, 0x9b, 0xbc, 0x0d, 0xe3, 0x6d, 0x4d, 0x0f, 0xd1, 0x2b, 0x66, 0x95, 0xae, 0xaf, 0x48, + 0xe4, 0x41, 0xbd, 0x14, 0x1b, 0xb4, 0x50, 0x0b, 0xc6, 0xc9, 0x9e, 0xdb, 0x54, 0xcf, 0x16, 0x85, + 0x63, 0xdf, 0x0d, 0xaa, 0x95, 0x65, 0x8d, 0x0e, 0x36, 0xa8, 0x3e, 0x84, 0x84, 0x77, 0xf6, 0x8f, + 0x58, 0xf0, 0x68, 0x4e, 0x84, 0x2b, 0xda, 0xdc, 0x3d, 0xf6, 0xb6, 0x27, 0x72, 0x67, 0xa9, 0xe6, + 0xf8, 0x8b, 0x1f, 0x16, 0x50, 0xf4, 0x39, 0x00, 0xfe, 0x62, 0x47, 0xc5, 0xa3, 0x7e, 0xa1, 0x80, + 0x8c, 0x28, 0x26, 0x5a, 0xf4, 0x09, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x9f, 0x2c, 0xc2, 0x10, 0x7b, + 0x21, 0x42, 0x2b, 0x30, 0xb2, 0xcd, 0x63, 0x3e, 0x0f, 0x12, 0x5e, 0x3a, 0x91, 0x33, 0x79, 0x01, + 0x96, 0x95, 0xd1, 0x2a, 0x9c, 0xe1, 0x31, 0xb3, 0xbd, 0x2a, 0xf1, 0x9c, 0x7d, 0xa9, 0xae, 0xe0, + 0xf9, 0xa6, 0x54, 0x24, 0x8d, 0x5a, 0x37, 0x0a, 0xce, 0xaa, 0x87, 0x5e, 0x87, 0x49, 0xca, 0xdf, + 0x05, 0x9d, 0x58, 0x52, 0xe2, 0xd1, 0xb2, 0x15, 0x43, 0xb9, 0x6e, 0x40, 0x71, 0x0a, 0x9b, 0x0a, + 0x5e, 0xed, 0x2e, 0xc5, 0xcc, 0x50, 0x22, 0x78, 0x99, 0xca, 0x18, 0x13, 0x97, 0x99, 0x32, 0x75, + 0x98, 0xe1, 0xd6, 0xfa, 0x76, 0x48, 0xa2, 0xed, 0xc0, 0x6b, 0x89, 0x74, 0xe5, 0x89, 0x29, 0x53, + 0x0a, 0x8e, 0xbb, 0x6a, 0x50, 0x2a, 0x9b, 0x8e, 0xeb, 0x75, 0x42, 0x92, 0x50, 0x19, 0x36, 0xa9, + 0xac, 0xa4, 0xe0, 0xb8, 0xab, 0x06, 0x5d, 0x47, 0xe7, 0x44, 0xfe, 0x70, 0xe9, 0xdf, 0xaf, 0xec, + 0xd3, 0x46, 0xa4, 0xa7, 0x53, 0x8f, 0x00, 0x37, 0xc2, 0x82, 0x47, 0x65, 0x20, 0xd7, 0xf4, 0x89, + 0xc2, 0xc7, 0x49, 0x52, 0x79, 0x90, 0x2c, 0xd6, 0xbf, 0x6b, 0xc1, 0x99, 0x0c, 0x3b, 0x58, 0x7e, + 0x54, 0x6d, 0xb9, 0x51, 0xac, 0x72, 0xea, 0x68, 0x47, 0x15, 0x2f, 0xc7, 0x0a, 0x83, 0xee, 0x07, + 0x7e, 0x18, 0xa6, 0x0f, 0x40, 0x61, 0x67, 0x26, 0xa0, 0xc7, 0xcc, 0x4e, 0x73, 0x09, 0x4a, 0x9d, + 0x88, 0xc8, 0xd0, 0x54, 0xea, 0xfc, 0x66, 0x1a, 0x66, 0x06, 0xa1, 0xac, 0xe9, 0x96, 0x52, 0xee, + 0x6a, 0xac, 0x29, 0xd7, 0xd8, 0x72, 0x98, 0xfd, 0xd5, 0x22, 0x9c, 0xcf, 0xb5, 0x78, 0xa7, 0x5d, + 0xda, 0x0d, 0x7c, 0x37, 0x0e, 0xd4, 0xeb, 0x23, 0x0f, 0x8e, 0x42, 0xda, 0xdb, 0xab, 0xa2, 0x1c, + 0x2b, 0x0c, 0x74, 0x59, 0x66, 0xb2, 0x4f, 0x67, 0x0d, 0x5a, 0xac, 0x1a, 0xc9, 0xec, 0x07, 0xcd, + 0xc8, 0xf6, 0x24, 0x94, 0xda, 0x41, 0xe0, 0xa5, 0x0f, 0x23, 0xda, 0xdd, 0x20, 0xf0, 0x30, 0x03, + 0xa2, 0x4f, 0x88, 0x71, 0x48, 0x3d, 0xb7, 0x61, 0xa7, 0x15, 0x44, 0xda, 0x60, 0x3c, 0x0d, 0x23, + 0x3b, 0x64, 0x3f, 0x74, 0xfd, 0xad, 0xf4, 0x33, 0xec, 0x4d, 0x5e, 0x8c, 0x25, 0xdc, 0xcc, 0x21, + 0x31, 0x72, 0xd2, 0xa9, 0xd4, 0x46, 0xfb, 0x5e, 0x6d, 0x3f, 0x50, 0x84, 0x29, 0xbc, 0x58, 0xfd, + 0xd6, 0x44, 0xdc, 0xe9, 0x9e, 0x88, 0x93, 0x4e, 0xa5, 0xd6, 0x7f, 0x36, 0x7e, 0xc1, 0x82, 0x29, + 0x16, 0x67, 0x59, 0x84, 0xe4, 0x70, 0x03, 0xff, 0x14, 0x58, 0xb7, 0x27, 0x61, 0x28, 0xa4, 0x8d, + 0xa6, 0xd3, 0x05, 0xb1, 0x9e, 0x60, 0x0e, 0x43, 0x8f, 0x43, 0x89, 0x75, 0x81, 0x4e, 0xde, 0x38, + 0xcf, 0xb4, 0x50, 0x75, 0x62, 0x07, 0xb3, 0x52, 0xe6, 0x67, 0x8e, 0x49, 0xdb, 0x73, 0x79, 0xa7, + 0x93, 0xa7, 0x8d, 0x8f, 0x86, 0x9f, 0x79, 0x66, 0xd7, 0x3e, 0x98, 0x9f, 0x79, 0x36, 0xc9, 0xde, + 0x62, 0xd1, 0x1f, 0x16, 0xe0, 0x62, 0x66, 0xbd, 0x81, 0xfd, 0xcc, 0x7b, 0xd7, 0x3e, 0x19, 0x6b, + 0x9a, 0x6c, 0x23, 0x97, 0xe2, 0x29, 0x1a, 0xb9, 0x94, 0x06, 0xe5, 0x1c, 0x87, 0x06, 0x70, 0xff, + 0xce, 0x1c, 0xb2, 0x8f, 0x88, 0xfb, 0x77, 0x66, 0xdf, 0x72, 0xc4, 0xba, 0x3f, 0x2b, 0xe4, 0x7c, + 0x0b, 0x13, 0xf0, 0xae, 0xd0, 0x73, 0x86, 0x01, 0x23, 0xc1, 0x09, 0x8f, 0xf3, 0x33, 0x86, 0x97, + 0x61, 0x05, 0x45, 0xae, 0xe6, 0x48, 0x5d, 0xc8, 0xcf, 0x9e, 0x99, 0xdb, 0xd4, 0xbc, 0xf9, 0x12, + 0xa5, 0x86, 0x20, 0xc3, 0xa9, 0x7a, 0x55, 0x13, 0xca, 0x8b, 0x83, 0x0b, 0xe5, 0xe3, 0xd9, 0x02, + 0x39, 0x5a, 0x80, 0xa9, 0x5d, 0xd7, 0xa7, 0xc7, 0xe6, 0xbe, 0xc9, 0x8a, 0xaa, 0xb8, 0x22, 0xab, + 0x26, 0x18, 0xa7, 0xf1, 0xe7, 0x5e, 0x85, 0x89, 0x07, 0x57, 0x47, 0x7e, 0xb3, 0x08, 0x8f, 0xf5, + 0xd8, 0xf6, 0xfc, 0xac, 0x37, 0xe6, 0x40, 0x3b, 0xeb, 0xbb, 0xe6, 0xa1, 0x0e, 0x67, 0x37, 0x3b, + 0x9e, 0xb7, 0xcf, 0xec, 0x48, 0x49, 0x4b, 0x62, 0x08, 0x5e, 0xf1, 0x71, 0x99, 0xdb, 0x62, 0x25, + 0x03, 0x07, 0x67, 0xd6, 0x44, 0x6f, 0x00, 0x0a, 0x44, 0xea, 0xde, 0xeb, 0xc4, 0x17, 0xfa, 0x7d, + 0x36, 0xf0, 0xc5, 0x64, 0x33, 0xde, 0xee, 0xc2, 0xc0, 0x19, 0xb5, 0x28, 0xd3, 0x4f, 0x6f, 0xa5, + 0x7d, 0xd5, 0xad, 0x14, 0xd3, 0x8f, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x75, 0x98, 0x71, 0xf6, 0x1c, + 0x97, 0xc7, 0xdb, 0x93, 0x04, 0x38, 0xd7, 0xaf, 0x94, 0x60, 0x0b, 0x69, 0x04, 0xdc, 0x5d, 0x27, + 0xe5, 0x6a, 0x3d, 0x9c, 0xef, 0x6a, 0xdd, 0xfb, 0x5c, 0xec, 0xa7, 0xd3, 0xb5, 0xff, 0xb3, 0x45, + 0xaf, 0xaf, 0x8c, 0xf4, 0xfb, 0x74, 0x1c, 0x94, 0x6e, 0x52, 0xf3, 0x7a, 0x3e, 0xa7, 0x59, 0x8a, + 0x24, 0x40, 0x6c, 0xe2, 0xf2, 0x05, 0x11, 0x25, 0xce, 0x36, 0x06, 0xeb, 0x2e, 0xa2, 0x26, 0x28, + 0x0c, 0xf4, 0x79, 0x18, 0x69, 0xb9, 0x7b, 0x6e, 0x14, 0x84, 0x62, 0xb3, 0x1c, 0xd3, 0x65, 0x21, + 0x39, 0x07, 0xab, 0x9c, 0x0c, 0x96, 0xf4, 0xec, 0x1f, 0x28, 0xc0, 0x84, 0x6c, 0xf1, 0xcd, 0x4e, + 0x10, 0x3b, 0xa7, 0x70, 0x2d, 0x5f, 0x37, 0xae, 0xe5, 0x4f, 0xf4, 0x0a, 0x1d, 0xc1, 0xba, 0x94, + 0x7b, 0x1d, 0xdf, 0x4e, 0x5d, 0xc7, 0x4f, 0xf5, 0x27, 0xd5, 0xfb, 0x1a, 0xfe, 0x97, 0x16, 0xcc, + 0x18, 0xf8, 0xa7, 0x70, 0x1b, 0xac, 0x98, 0xb7, 0xc1, 0x13, 0x7d, 0xbf, 0x21, 0xe7, 0x16, 0xf8, + 0xbe, 0x62, 0xaa, 0xef, 0xec, 0xf4, 0x7f, 0x0f, 0x4a, 0xdb, 0x4e, 0xd8, 0xea, 0x15, 0xa2, 0xb6, + 0xab, 0xd2, 0xfc, 0x0d, 0x27, 0x6c, 0xf1, 0x33, 0xfc, 0x59, 0x95, 0xff, 0xd2, 0x09, 0x5b, 0x7d, + 0x7d, 0xcb, 0x58, 0x53, 0xe8, 0x15, 0x18, 0x8e, 0x9a, 0x41, 0x5b, 0x59, 0x7e, 0x5e, 0xe2, 0xb9, + 0x31, 0x69, 0xc9, 0xd1, 0x41, 0x05, 0x99, 0xcd, 0xd1, 0x62, 0x2c, 0xf0, 0xd1, 0xdb, 0x30, 0xc1, + 0x7e, 0x29, 0x0b, 0x88, 0x62, 0x7e, 0x62, 0x84, 0x86, 0x8e, 0xc8, 0x0d, 0x69, 0x8c, 0x22, 0x6c, + 0x92, 0x9a, 0xdb, 0x82, 0xb2, 0xfa, 0xac, 0x87, 0xea, 0x13, 0xf4, 0x1f, 0x8a, 0x70, 0x26, 0x63, + 0xcd, 0xa1, 0xc8, 0x98, 0x89, 0xe7, 0x07, 0x5c, 0xaa, 0x1f, 0x70, 0x2e, 0x22, 0x26, 0x0d, 0xb5, + 0xc4, 0xda, 0x1a, 0xb8, 0xd1, 0x3b, 0x11, 0x49, 0x37, 0x4a, 0x8b, 0xfa, 0x37, 0x4a, 0x1b, 0x3b, + 0xb5, 0xa1, 0xa6, 0x0d, 0xa9, 0x9e, 0x3e, 0xd4, 0x39, 0xfd, 0xe3, 0x22, 0x9c, 0xcd, 0x8a, 0x66, + 0x83, 0xbe, 0x3b, 0x95, 0x24, 0xe7, 0xc5, 0x41, 0xe3, 0xe0, 0xf0, 0xcc, 0x39, 0x22, 0xc7, 0xf5, + 0xbc, 0x99, 0x36, 0xa7, 0xef, 0x30, 0x8b, 0x36, 0x99, 0x23, 0x69, 0xc8, 0x93, 0x1b, 0xc9, 0xe3, + 0xe3, 0xd3, 0x03, 0x77, 0x40, 0x64, 0x45, 0x8a, 0x52, 0x8e, 0xa4, 0xb2, 0xb8, 0xbf, 0x23, 0xa9, + 0x6c, 0x79, 0xce, 0x85, 0x31, 0xed, 0x6b, 0x1e, 0xea, 0x8c, 0xef, 0xd0, 0xdb, 0x4a, 0xeb, 0xf7, + 0x43, 0x9d, 0xf5, 0x1f, 0xb1, 0x20, 0x65, 0x66, 0xa9, 0xd4, 0x5d, 0x56, 0xae, 0xba, 0xeb, 0x12, + 0x94, 0xc2, 0xc0, 0x23, 0xe9, 0x9c, 0x34, 0x38, 0xf0, 0x08, 0x66, 0x10, 0x8a, 0x11, 0x27, 0xca, + 0x8e, 0x71, 0x5d, 0x90, 0x13, 0x22, 0xda, 0x93, 0x30, 0xe4, 0x91, 0x3d, 0xe2, 0xa5, 0x03, 0xbe, + 0xdf, 0xa2, 0x85, 0x98, 0xc3, 0xec, 0x5f, 0x28, 0xc1, 0x85, 0x9e, 0xae, 0xd8, 0x54, 0x1c, 0xda, + 0x72, 0x62, 0x72, 0xcf, 0xd9, 0x4f, 0x47, 0x66, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0x33, 0xcb, 0x73, + 0x1e, 0x89, 0x31, 0xa5, 0x1c, 0x14, 0x01, 0x18, 0x05, 0xf4, 0x21, 0xe4, 0xf7, 0xbf, 0x06, 0x10, + 0x45, 0x1e, 0xb7, 0x1b, 0x68, 0x09, 0x93, 0xf6, 0x24, 0x62, 0x67, 0xe3, 0x96, 0x80, 0x60, 0x0d, + 0x0b, 0x55, 0x61, 0xba, 0x1d, 0x06, 0x31, 0xd7, 0xb5, 0x56, 0xb9, 0xc1, 0xd1, 0x90, 0xe9, 0x05, + 0x5b, 0x4f, 0xc1, 0x71, 0x57, 0x0d, 0xf4, 0x12, 0x8c, 0x09, 0xcf, 0xd8, 0x7a, 0x10, 0x78, 0x42, + 0x0d, 0xa4, 0xcc, 0x57, 0x1a, 0x09, 0x08, 0xeb, 0x78, 0x5a, 0x35, 0xa6, 0xc0, 0x1d, 0xc9, 0xac, + 0xc6, 0x95, 0xb8, 0x1a, 0x5e, 0x2a, 0xb2, 0xd5, 0xe8, 0x40, 0x91, 0xad, 0x12, 0xc5, 0x58, 0x79, + 0xe0, 0x37, 0x2b, 0xe8, 0xab, 0x4a, 0xfa, 0xd9, 0x12, 0x9c, 0x11, 0x0b, 0xe7, 0x61, 0x2f, 0x97, + 0x3b, 0xdd, 0xcb, 0xe5, 0x24, 0x54, 0x67, 0xdf, 0x5a, 0x33, 0xa7, 0xbd, 0x66, 0x7e, 0xd0, 0x02, + 0x93, 0xbd, 0x42, 0x7f, 0x29, 0x37, 0xb4, 0xfd, 0x4b, 0xb9, 0xec, 0x5a, 0x4b, 0x5e, 0x20, 0x1f, + 0x30, 0xc8, 0xbd, 0xfd, 0x9f, 0x2c, 0x78, 0xa2, 0x2f, 0x45, 0xb4, 0x0c, 0x65, 0xc6, 0x03, 0x6a, + 0xd2, 0xd9, 0x53, 0xca, 0x20, 0x51, 0x02, 0x72, 0x58, 0xd2, 0xa4, 0x26, 0x5a, 0xee, 0xca, 0x21, + 0xf0, 0x74, 0x46, 0x0e, 0x81, 0x73, 0xc6, 0xf0, 0x3c, 0x60, 0x12, 0x81, 0x5f, 0x2e, 0xc2, 0x30, + 0x5f, 0xf1, 0xa7, 0x20, 0x86, 0xad, 0x08, 0xbd, 0x6d, 0x8f, 0xd8, 0x56, 0xbc, 0x2f, 0xf3, 0x55, + 0x27, 0x76, 0x38, 0x9b, 0xa0, 0x6e, 0xab, 0x44, 0xc3, 0x8b, 0xe6, 0x8d, 0xfb, 0x6c, 0x2e, 0xa5, + 0x98, 0x04, 0x4e, 0x43, 0xbb, 0xdd, 0xbe, 0x08, 0x10, 0xb1, 0xfc, 0xfb, 0x94, 0x86, 0x88, 0x92, + 0xf6, 0xc9, 0x1e, 0xad, 0x37, 0x14, 0x32, 0xef, 0x43, 0xb2, 0xd3, 0x15, 0x00, 0x6b, 0x14, 0xe7, + 0x5e, 0x86, 0xb2, 0x42, 0xee, 0xa7, 0xc5, 0x19, 0xd7, 0x99, 0x8b, 0xcf, 0xc2, 0x54, 0xaa, 0xad, + 0x63, 0x29, 0x81, 0x7e, 0xd1, 0x82, 0x29, 0xde, 0xe5, 0x65, 0x7f, 0x4f, 0x9c, 0xa9, 0xef, 0xc3, + 0x59, 0x2f, 0xe3, 0x6c, 0x13, 0x33, 0x3a, 0xf8, 0x59, 0xa8, 0x94, 0x3e, 0x59, 0x50, 0x9c, 0xd9, + 0x06, 0xba, 0x42, 0xd7, 0x2d, 0x3d, 0xbb, 0x1c, 0x4f, 0x78, 0x31, 0x8d, 0xf3, 0x35, 0xcb, 0xcb, + 0xb0, 0x82, 0xda, 0xbf, 0x6d, 0xc1, 0x0c, 0xef, 0xf9, 0x4d, 0xb2, 0xaf, 0x76, 0xf8, 0x87, 0xd9, + 0x77, 0x91, 0xd6, 0xa3, 0x90, 0x93, 0xd6, 0x43, 0xff, 0xb4, 0x62, 0xcf, 0x4f, 0xfb, 0x19, 0x0b, + 0xc4, 0x0a, 0x3c, 0x05, 0x51, 0xfe, 0xdb, 0x4d, 0x51, 0x7e, 0x2e, 0x7f, 0x51, 0xe7, 0xc8, 0xf0, + 0x7f, 0x6a, 0xc1, 0x34, 0x47, 0x48, 0xde, 0x92, 0x3f, 0xd4, 0x79, 0x18, 0x24, 0x3f, 0x9f, 0x4a, + 0xda, 0x9d, 0xfd, 0x51, 0xc6, 0x64, 0x95, 0x7a, 0x4e, 0x56, 0x4b, 0x6e, 0xa0, 0x63, 0xe4, 0xa6, + 0x3c, 0x76, 0x78, 0x6c, 0xfb, 0x0f, 0x2c, 0x40, 0xbc, 0x19, 0x83, 0xfd, 0xa1, 0x4c, 0x05, 0x2b, + 0xd5, 0xae, 0x8b, 0xe4, 0xa8, 0x51, 0x10, 0xac, 0x61, 0x9d, 0xc8, 0xf0, 0xa4, 0x0c, 0x02, 0x8a, + 0xfd, 0x0d, 0x02, 0x8e, 0x31, 0xa2, 0xff, 0xa7, 0x04, 0x69, 0xb7, 0x02, 0x74, 0x17, 0xc6, 0x9b, + 0x4e, 0xdb, 0xd9, 0x70, 0x3d, 0x37, 0x76, 0x49, 0xd4, 0xcb, 0x92, 0x68, 0x49, 0xc3, 0x13, 0x4f, + 0xbd, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x79, 0x80, 0x76, 0xe8, 0xee, 0xb9, 0x1e, 0xd9, 0x62, 0x1a, + 0x07, 0xe6, 0x37, 0xc9, 0xcd, 0x63, 0x64, 0x29, 0xd6, 0x30, 0x32, 0x5c, 0xe0, 0x8a, 0x0f, 0xcf, + 0x05, 0xae, 0x74, 0x4c, 0x17, 0xb8, 0xa1, 0x81, 0x5c, 0xe0, 0x30, 0x3c, 0x22, 0x59, 0x24, 0xfa, + 0x7f, 0xc5, 0xf5, 0x88, 0xe0, 0x8b, 0xb9, 0x37, 0xe5, 0xdc, 0xe1, 0x41, 0xe5, 0x11, 0x9c, 0x89, + 0x81, 0x73, 0x6a, 0xa2, 0xcf, 0xc1, 0xac, 0xe3, 0x79, 0xc1, 0x3d, 0x35, 0x6a, 0xcb, 0x51, 0xd3, + 0xf1, 0xb8, 0xc6, 0x7e, 0x84, 0x51, 0x7d, 0xfc, 0xf0, 0xa0, 0x32, 0xbb, 0x90, 0x83, 0x83, 0x73, + 0x6b, 0xa7, 0x3c, 0xe8, 0x46, 0xfb, 0x7a, 0xd0, 0xbd, 0x06, 0xe5, 0x76, 0x18, 0x34, 0x57, 0x35, + 0xaf, 0x9e, 0x8b, 0x2c, 0xf3, 0xbd, 0x2c, 0x3c, 0x3a, 0xa8, 0x4c, 0xa8, 0x3f, 0xec, 0x86, 0x4f, + 0x2a, 0xd8, 0x3b, 0x70, 0xa6, 0x41, 0x42, 0x97, 0xe5, 0xd4, 0x6c, 0x25, 0x1b, 0x7a, 0x1d, 0xca, + 0x61, 0xea, 0x08, 0x1b, 0x28, 0x40, 0x93, 0x16, 0x37, 0x58, 0x1e, 0x59, 0x09, 0x21, 0xfb, 0x4f, + 0x2c, 0x18, 0x11, 0x16, 0xe6, 0xa7, 0xc0, 0x39, 0x2d, 0x18, 0x0a, 0xec, 0x4a, 0xf6, 0x31, 0xcf, + 0x3a, 0x93, 0xab, 0xba, 0xae, 0xa5, 0x54, 0xd7, 0x4f, 0xf4, 0x22, 0xd2, 0x5b, 0x69, 0xfd, 0x77, + 0x8b, 0x30, 0x69, 0x3a, 0x85, 0x9c, 0xc2, 0x10, 0xac, 0xc1, 0x48, 0x24, 0x3c, 0x90, 0x0a, 0xf9, + 0x96, 0xd3, 0xe9, 0x49, 0x4c, 0xcc, 0xa2, 0x84, 0xcf, 0x91, 0x24, 0x92, 0xe9, 0xda, 0x54, 0x7c, + 0x88, 0xae, 0x4d, 0xfd, 0xfc, 0x72, 0x4a, 0x27, 0xe1, 0x97, 0x63, 0x7f, 0x9d, 0x5d, 0x35, 0x7a, + 0xf9, 0x29, 0x70, 0x21, 0xd7, 0xcd, 0x4b, 0xc9, 0xee, 0xb1, 0xb2, 0x44, 0xa7, 0x72, 0xb8, 0x91, + 0x9f, 0xb7, 0xe0, 0x42, 0xc6, 0x57, 0x69, 0xac, 0xc9, 0xb3, 0x30, 0xea, 0x74, 0x5a, 0xae, 0xda, + 0xcb, 0xda, 0x33, 0xd6, 0x82, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0xc1, 0x0c, 0xb9, 0xdf, 0x76, 0xf9, + 0x3b, 0xa2, 0x6e, 0xbb, 0x58, 0xe4, 0x41, 0x6b, 0x97, 0xd3, 0x40, 0xdc, 0x8d, 0xaf, 0xdc, 0xba, + 0x8b, 0xb9, 0x6e, 0xdd, 0xff, 0xc4, 0x82, 0x31, 0xe5, 0x6d, 0xf2, 0xd0, 0x47, 0xfb, 0x3b, 0xcc, + 0xd1, 0x7e, 0xac, 0xc7, 0x68, 0xe7, 0x0c, 0xf3, 0xdf, 0x2f, 0xa8, 0xfe, 0xd6, 0x83, 0x30, 0x1e, + 0x80, 0xe5, 0x79, 0x05, 0x46, 0xdb, 0x61, 0x10, 0x07, 0xcd, 0xc0, 0x13, 0x1c, 0xcf, 0xe3, 0x49, + 0xd4, 0x01, 0x5e, 0x7e, 0xa4, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x5e, 0x10, 0xc6, 0x82, 0xcb, 0x48, + 0x46, 0x2f, 0x08, 0x63, 0xcc, 0x20, 0xa8, 0x05, 0x10, 0x3b, 0xe1, 0x16, 0x89, 0x69, 0x99, 0x08, + 0x60, 0x92, 0x7f, 0x78, 0x74, 0x62, 0xd7, 0x9b, 0x77, 0xfd, 0x38, 0x8a, 0xc3, 0xf9, 0x9a, 0x1f, + 0xdf, 0x0e, 0xb9, 0x00, 0xa5, 0x85, 0x11, 0x50, 0xb4, 0xb0, 0x46, 0x57, 0xfa, 0x7a, 0xb2, 0x36, + 0x86, 0xcc, 0x07, 0xf1, 0x35, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x32, 0xbb, 0x4a, 0xd8, 0x00, 0x1d, + 0xcf, 0xc3, 0xff, 0x1b, 0xa3, 0x6a, 0x68, 0xd9, 0x6b, 0x58, 0x55, 0x8f, 0x23, 0xd0, 0xfb, 0xe4, + 0xa6, 0x0d, 0xeb, 0x7e, 0x34, 0x49, 0xb0, 0x01, 0xf4, 0x9d, 0x5d, 0x76, 0x12, 0xcf, 0xf5, 0xb9, + 0x02, 0x8e, 0x61, 0x19, 0xc1, 0x02, 0x69, 0xb3, 0x30, 0xc3, 0xb5, 0xba, 0x58, 0xe4, 0x5a, 0x20, + 0x6d, 0x01, 0xc0, 0x09, 0x0e, 0xba, 0x2a, 0xc4, 0xef, 0x92, 0x91, 0x4e, 0x4f, 0x8a, 0xdf, 0xf2, + 0xf3, 0x35, 0xf9, 0xfb, 0x79, 0x18, 0x53, 0x69, 0xf5, 0xea, 0x3c, 0x3b, 0x99, 0x08, 0xe7, 0xb2, + 0x9c, 0x14, 0x63, 0x1d, 0x07, 0xad, 0xc3, 0x54, 0xc4, 0x75, 0x2f, 0x2a, 0x6a, 0x1f, 0xd7, 0x61, + 0x7d, 0x52, 0xda, 0x57, 0x34, 0x4c, 0xf0, 0x11, 0x2b, 0xe2, 0x47, 0x87, 0x74, 0xd8, 0x4c, 0x93, + 0x40, 0xaf, 0xc3, 0xa4, 0xa7, 0x27, 0xb0, 0xaf, 0x0b, 0x15, 0x97, 0x32, 0x3f, 0x36, 0xd2, 0xdb, + 0xd7, 0x71, 0x0a, 0x9b, 0x72, 0x4a, 0x7a, 0x89, 0x88, 0x34, 0xe9, 0xf8, 0x5b, 0x24, 0x12, 0x49, + 0xc1, 0x18, 0xa7, 0x74, 0x2b, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0xaf, 0xc0, 0xb8, 0xfc, 0x7c, 0xcd, + 0x1d, 0x39, 0x31, 0x72, 0xd7, 0x60, 0xd8, 0xc0, 0x44, 0xf7, 0xe0, 0x9c, 0xfc, 0xbf, 0x1e, 0x3a, + 0x9b, 0x9b, 0x6e, 0x53, 0x78, 0x83, 0x73, 0x4f, 0x9f, 0x05, 0xe9, 0x3a, 0xb4, 0x9c, 0x85, 0x74, + 0x74, 0x50, 0xb9, 0x24, 0x46, 0x2d, 0x13, 0xce, 0x26, 0x31, 0x9b, 0x3e, 0x5a, 0x85, 0x33, 0xdb, + 0xc4, 0xf1, 0xe2, 0xed, 0xa5, 0x6d, 0xd2, 0xdc, 0x91, 0x9b, 0x88, 0x39, 0x39, 0x6b, 0xa6, 0xe1, + 0x37, 0xba, 0x51, 0x70, 0x56, 0x3d, 0xf4, 0x0e, 0xcc, 0xb6, 0x3b, 0x1b, 0x9e, 0x1b, 0x6d, 0xaf, + 0x05, 0x31, 0x33, 0xe9, 0x50, 0x59, 0xe9, 0x84, 0x37, 0xb4, 0x72, 0xf0, 0xae, 0xe7, 0xe0, 0xe1, + 0x5c, 0x0a, 0xe8, 0x7d, 0x38, 0x97, 0x5a, 0x0c, 0xc2, 0x37, 0x73, 0x32, 0x3f, 0x6e, 0x6f, 0x23, + 0xab, 0x82, 0xf0, 0xb5, 0xcc, 0x02, 0xe1, 0xec, 0x26, 0x3e, 0x98, 0xa1, 0xcf, 0x7b, 0xb4, 0xb2, + 0xc6, 0x94, 0xa1, 0x2f, 0xc1, 0xb8, 0xbe, 0x8a, 0xc4, 0x05, 0x73, 0x39, 0x9b, 0x67, 0xd1, 0x56, + 0x1b, 0x67, 0xe9, 0xd4, 0x8a, 0xd2, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0xfe, 0x3e, 0x74, 0x0b, + 0x46, 0x9b, 0x9e, 0x4b, 0xfc, 0xb8, 0x56, 0xef, 0x15, 0x3c, 0x64, 0x49, 0xe0, 0x88, 0x01, 0x13, + 0x81, 0x4e, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0x2b, 0x40, 0xa5, 0x4f, 0xd4, 0xdc, 0x94, 0x3e, + 0xda, 0x1a, 0x48, 0x1f, 0xbd, 0x20, 0x73, 0xec, 0xad, 0xa5, 0x84, 0xf4, 0x54, 0xfe, 0xbc, 0x44, + 0x54, 0x4f, 0xe3, 0x0f, 0x6c, 0x1f, 0xac, 0xab, 0xb4, 0x4b, 0x7d, 0x2d, 0xd7, 0x8d, 0xa7, 0xac, + 0xa1, 0xc1, 0x05, 0x91, 0xdc, 0x67, 0x09, 0xfb, 0xeb, 0x05, 0x38, 0xa7, 0x86, 0xf0, 0x2f, 0xee, + 0xc0, 0xdd, 0xe9, 0x1e, 0xb8, 0x13, 0x78, 0xd4, 0xb1, 0x6f, 0xc3, 0x30, 0x0f, 0xbe, 0x32, 0x00, + 0x03, 0xf4, 0xa4, 0x19, 0xa9, 0x4b, 0x5d, 0xd3, 0x46, 0xb4, 0xae, 0xbf, 0x66, 0xc1, 0xd4, 0xfa, + 0x52, 0xbd, 0x11, 0x34, 0x77, 0x48, 0xbc, 0xc0, 0x19, 0x56, 0x2c, 0xf8, 0x1f, 0xeb, 0x01, 0xf9, + 0x9a, 0x2c, 0x8e, 0xe9, 0x12, 0x94, 0xb6, 0x83, 0x28, 0x4e, 0xbf, 0xf8, 0xde, 0x08, 0xa2, 0x18, + 0x33, 0x88, 0xfd, 0x3b, 0x16, 0x0c, 0xb1, 0xcc, 0xb0, 0xfd, 0xd2, 0x15, 0x0f, 0xf2, 0x5d, 0xe8, + 0x25, 0x18, 0x26, 0x9b, 0x9b, 0xa4, 0x19, 0x8b, 0x59, 0x95, 0xee, 0xa8, 0xc3, 0xcb, 0xac, 0x94, + 0x5e, 0xfa, 0xac, 0x31, 0xfe, 0x17, 0x0b, 0x64, 0xf4, 0x16, 0x94, 0x63, 0x77, 0x97, 0x2c, 0xb4, + 0x5a, 0xe2, 0xcd, 0xec, 0x01, 0xbc, 0x7f, 0xd7, 0x25, 0x01, 0x9c, 0xd0, 0xb2, 0xbf, 0x5a, 0x00, + 0x48, 0x42, 0x08, 0xf4, 0xfb, 0xc4, 0xc5, 0xae, 0xd7, 0x94, 0xcb, 0x19, 0xaf, 0x29, 0x28, 0x21, + 0x98, 0xf1, 0x94, 0xa2, 0x86, 0xa9, 0x38, 0xd0, 0x30, 0x95, 0x8e, 0x33, 0x4c, 0x4b, 0x30, 0x93, + 0x84, 0x40, 0x30, 0xe3, 0xc1, 0x30, 0x21, 0x65, 0x3d, 0x0d, 0xc4, 0xdd, 0xf8, 0x36, 0x81, 0x4b, + 0x32, 0x32, 0xa7, 0xbc, 0x6b, 0x98, 0x49, 0xe6, 0x31, 0x32, 0x57, 0x27, 0xcf, 0x45, 0x85, 0xdc, + 0xe7, 0xa2, 0x1f, 0xb7, 0xe0, 0x6c, 0xba, 0x1d, 0xe6, 0xfb, 0xf6, 0x15, 0x0b, 0xce, 0xb1, 0x47, + 0x33, 0xd6, 0x6a, 0xf7, 0x13, 0xdd, 0x8b, 0xd9, 0xa1, 0x21, 0x7a, 0xf7, 0x38, 0xf1, 0x7b, 0x5e, + 0xcd, 0x22, 0x8d, 0xb3, 0x5b, 0xb4, 0xbf, 0x62, 0xc1, 0xf9, 0xdc, 0x84, 0x44, 0xe8, 0x0a, 0x8c, + 0x3a, 0x6d, 0x97, 0x6b, 0xa4, 0xc4, 0x7e, 0x67, 0xd2, 0x63, 0xbd, 0xc6, 0xf5, 0x51, 0x0a, 0xaa, + 0x12, 0x25, 0x16, 0x72, 0x13, 0x25, 0xf6, 0xcd, 0x7b, 0x68, 0x7f, 0xbf, 0x05, 0xc2, 0xdd, 0x69, + 0x80, 0x43, 0xe6, 0x6d, 0x99, 0x67, 0xd6, 0x08, 0x8a, 0x7e, 0x29, 0xdf, 0xff, 0x4b, 0x84, 0x42, + 0x57, 0x97, 0xba, 0x11, 0x00, 0xdd, 0xa0, 0x65, 0xb7, 0x40, 0x40, 0xab, 0x84, 0xe9, 0xac, 0xfa, + 0xf7, 0xe6, 0x1a, 0x40, 0x8b, 0xe1, 0x6a, 0xd9, 0x26, 0xd5, 0x15, 0x52, 0x55, 0x10, 0xac, 0x61, + 0xd9, 0x3f, 0x54, 0x80, 0x31, 0x19, 0x84, 0xbb, 0xe3, 0x0f, 0x22, 0x59, 0x1e, 0x2b, 0x2b, 0x0f, + 0x4b, 0xcf, 0x4a, 0x09, 0xd7, 0x13, 0x81, 0x3c, 0x49, 0xcf, 0x2a, 0x01, 0x38, 0xc1, 0x41, 0x4f, + 0xc3, 0x48, 0xd4, 0xd9, 0x60, 0xe8, 0x29, 0x27, 0x9e, 0x06, 0x2f, 0xc6, 0x12, 0x8e, 0x3e, 0x07, + 0xd3, 0xbc, 0x5e, 0x18, 0xb4, 0x9d, 0x2d, 0xae, 0xfe, 0x1c, 0x52, 0x5e, 0xb5, 0xd3, 0xab, 0x29, + 0xd8, 0xd1, 0x41, 0xe5, 0x6c, 0xba, 0x8c, 0x29, 0xce, 0xbb, 0xa8, 0xd8, 0x5f, 0x02, 0xd4, 0x1d, + 0x57, 0x1c, 0xbd, 0xc1, 0x4d, 0xa9, 0xdc, 0x90, 0xb4, 0x7a, 0x69, 0xc4, 0x75, 0x27, 0x50, 0x69, + 0x48, 0xcf, 0x6b, 0x61, 0x55, 0xdf, 0xfe, 0x9b, 0x45, 0x98, 0x4e, 0xbb, 0x04, 0xa2, 0x1b, 0x30, + 0xcc, 0x2f, 0x3b, 0x41, 0xbe, 0xc7, 0x83, 0xab, 0xe6, 0x48, 0xc8, 0xb6, 0xbd, 0xb8, 0x2f, 0x45, + 0x7d, 0xf4, 0x0e, 0x8c, 0xb5, 0x82, 0x7b, 0xfe, 0x3d, 0x27, 0x6c, 0x2d, 0xd4, 0x6b, 0x62, 0x5d, + 0x66, 0xf2, 0xcc, 0xd5, 0x04, 0x4d, 0x77, 0x4e, 0x64, 0x8f, 0x0b, 0x09, 0x08, 0xeb, 0xe4, 0xd0, + 0x3a, 0x8b, 0x95, 0xb8, 0xe9, 0x6e, 0xad, 0x3a, 0xed, 0x5e, 0x76, 0xb5, 0x4b, 0x12, 0x49, 0xa3, + 0x3c, 0x21, 0x02, 0x2a, 0x72, 0x00, 0x4e, 0x08, 0xa1, 0xef, 0x86, 0x33, 0x51, 0x8e, 0x9a, 0x2d, + 0x2f, 0xcd, 0x44, 0x2f, 0xcd, 0xd3, 0xe2, 0xa3, 0x54, 0x9a, 0xc9, 0x52, 0xc8, 0x65, 0x35, 0x63, + 0x7f, 0xf9, 0x0c, 0x18, 0xbb, 0xd1, 0xc8, 0x3a, 0x64, 0x9d, 0x50, 0xd6, 0x21, 0x0c, 0xa3, 0x64, + 0xb7, 0x1d, 0xef, 0x57, 0xdd, 0xb0, 0x57, 0x56, 0xbc, 0x65, 0x81, 0xd3, 0x4d, 0x53, 0x42, 0xb0, + 0xa2, 0x93, 0x9d, 0x1a, 0xaa, 0xf8, 0x21, 0xa6, 0x86, 0x2a, 0x9d, 0x62, 0x6a, 0xa8, 0x35, 0x18, + 0xd9, 0x72, 0x63, 0x4c, 0xda, 0x81, 0x60, 0x33, 0x33, 0xd7, 0xe1, 0x75, 0x8e, 0xd2, 0x9d, 0x84, + 0x44, 0x00, 0xb0, 0x24, 0x82, 0xde, 0x50, 0x3b, 0x70, 0x38, 0x5f, 0x4a, 0xeb, 0x7e, 0x19, 0xcc, + 0xdc, 0x83, 0x22, 0x01, 0xd4, 0xc8, 0x83, 0x26, 0x80, 0x5a, 0x91, 0x69, 0x9b, 0x46, 0xf3, 0x8d, + 0xe0, 0x59, 0x56, 0xa6, 0x3e, 0xc9, 0x9a, 0xee, 0xea, 0xa9, 0xae, 0xca, 0xf9, 0x27, 0x81, 0xca, + 0x62, 0x35, 0x60, 0x82, 0xab, 0xef, 0xb7, 0xe0, 0x5c, 0x3b, 0x2b, 0xeb, 0x9b, 0x48, 0xb6, 0xf4, + 0xd2, 0xc0, 0x69, 0xed, 0x8c, 0x06, 0x99, 0xb8, 0x9e, 0x89, 0x86, 0xb3, 0x9b, 0xa3, 0x03, 0x1d, + 0x6e, 0xb4, 0x44, 0x86, 0xa6, 0x27, 0x73, 0x32, 0x65, 0xf5, 0xc8, 0x8f, 0xb5, 0x9e, 0x91, 0x95, + 0xe9, 0xe3, 0x79, 0x59, 0x99, 0x06, 0xce, 0xc5, 0xf4, 0x86, 0xca, 0x91, 0x35, 0x91, 0xbf, 0x94, + 0x78, 0x06, 0xac, 0xbe, 0x99, 0xb1, 0xde, 0x50, 0x99, 0xb1, 0x7a, 0xc4, 0x8c, 0xe3, 0x79, 0xaf, + 0xfa, 0xe6, 0xc3, 0xd2, 0x72, 0x5a, 0x4d, 0x9d, 0x4c, 0x4e, 0x2b, 0xe3, 0xaa, 0xe1, 0x69, 0x95, + 0x9e, 0xe9, 0x73, 0xd5, 0x18, 0x74, 0x7b, 0x5f, 0x36, 0x3c, 0x7f, 0xd7, 0xcc, 0x03, 0xe5, 0xef, + 0xba, 0xab, 0xe7, 0xc3, 0x42, 0x7d, 0x12, 0x3e, 0x51, 0xa4, 0x01, 0xb3, 0x60, 0xdd, 0xd5, 0x2f, + 0xc0, 0x33, 0xf9, 0x74, 0xd5, 0x3d, 0xd7, 0x4d, 0x37, 0xf3, 0x0a, 0xec, 0xca, 0xae, 0x75, 0xf6, + 0x74, 0xb2, 0x6b, 0x9d, 0x3b, 0xf1, 0xec, 0x5a, 0x8f, 0x9c, 0x42, 0x76, 0xad, 0x47, 0x3f, 0xd4, + 0xec, 0x5a, 0xb3, 0x0f, 0x21, 0xbb, 0xd6, 0x5a, 0x92, 0x5d, 0xeb, 0x7c, 0xfe, 0x94, 0x64, 0x58, + 0xe6, 0xe6, 0xe4, 0xd4, 0xba, 0xcb, 0x9e, 0xe7, 0x79, 0xcc, 0x0a, 0x11, 0xd4, 0x2e, 0x3b, 0x93, + 0x70, 0x56, 0x60, 0x0b, 0x3e, 0x25, 0x0a, 0x84, 0x13, 0x52, 0x94, 0x6e, 0x92, 0x63, 0xeb, 0xb1, + 0x1e, 0x0a, 0xd9, 0x2c, 0x55, 0x57, 0x7e, 0x66, 0x2d, 0xfb, 0xaf, 0x17, 0xe0, 0x62, 0xef, 0x75, + 0x9d, 0xe8, 0xc9, 0xea, 0xc9, 0xbb, 0x4e, 0x4a, 0x4f, 0xc6, 0x85, 0x9c, 0x04, 0x6b, 0xe0, 0xc0, + 0x3e, 0xd7, 0x61, 0x46, 0x99, 0xe4, 0x7a, 0x6e, 0x73, 0x5f, 0xcb, 0x30, 0xac, 0x5c, 0x0f, 0x1b, + 0x69, 0x04, 0xdc, 0x5d, 0x07, 0x2d, 0xc0, 0x94, 0x51, 0x58, 0xab, 0x0a, 0x61, 0x46, 0x29, 0xe6, + 0x1a, 0x26, 0x18, 0xa7, 0xf1, 0xed, 0x9f, 0xb6, 0xe0, 0xd1, 0x9c, 0xc4, 0x13, 0x03, 0xc7, 0xad, + 0xd9, 0x84, 0xa9, 0xb6, 0x59, 0xb5, 0x4f, 0x78, 0x2b, 0x23, 0xbd, 0x85, 0xea, 0x6b, 0x0a, 0x80, + 0xd3, 0x44, 0x17, 0xaf, 0xfc, 0xe6, 0xef, 0x5d, 0xfc, 0xd8, 0x6f, 0xfd, 0xde, 0xc5, 0x8f, 0xfd, + 0xf6, 0xef, 0x5d, 0xfc, 0xd8, 0x5f, 0x3e, 0xbc, 0x68, 0xfd, 0xe6, 0xe1, 0x45, 0xeb, 0xb7, 0x0e, + 0x2f, 0x5a, 0xbf, 0x7d, 0x78, 0xd1, 0xfa, 0xdd, 0xc3, 0x8b, 0xd6, 0x57, 0x7f, 0xff, 0xe2, 0xc7, + 0xde, 0x2e, 0xec, 0x3d, 0xff, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x84, 0x97, 0x9c, 0xb4, 0x50, + 0xe8, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index f76251d524ec4..b13a2db72f584 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -31,7 +31,7 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; option go_package = "v1"; // Represents a Persistent Disk resource in AWS. -// +// // An AWS EBS disk must exist before mounting to a container. The disk // must also be in the same AWS zone as the kubelet. An AWS EBS disk // can only be mounted as read/write once. AWS EBS volumes support @@ -198,7 +198,7 @@ message CSIPersistentVolumeSource { // ControllerPublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference controllerPublishSecretRef = 6; @@ -206,7 +206,7 @@ message CSIPersistentVolumeSource { // NodeStageSecretRef is a reference to the secret object containing sensitive // information to pass to the CSI driver to complete the CSI NodeStageVolume // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference nodeStageSecretRef = 7; @@ -214,7 +214,7 @@ message CSIPersistentVolumeSource { // NodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference nodePublishSecretRef = 8; @@ -436,7 +436,7 @@ message ConfigMap { // ConfigMapEnvSource selects a ConfigMap to populate the environment // variables with. -// +// // The contents of the target ConfigMap's Data field will represent the // key-value pairs as environment variables. message ConfigMapEnvSource { @@ -497,7 +497,7 @@ message ConfigMapNodeConfigSource { } // Adapts a ConfigMap into a projected volume. -// +// // The contents of the target ConfigMap's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names, // unless the items element is populated with specific mappings of keys to paths. @@ -522,7 +522,7 @@ message ConfigMapProjection { } // Adapts a ConfigMap into a volume. -// +// // The contents of the target ConfigMap's Data field will be presented in a // volume as files using the keys in the Data field as the file names, unless // the items element is populated with specific mappings of keys to paths. @@ -606,6 +606,9 @@ message Container { // +optional // +patchMergeKey=containerPort // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol repeated ContainerPort ports = 6; // List of sources to populate environment variables in the container. @@ -638,7 +641,7 @@ message Container { repeated VolumeMount volumeMounts = 9; // volumeDevices is the list of block devices to be used by the container. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +patchMergeKey=devicePath // +patchStrategy=merge // +optional @@ -1314,7 +1317,7 @@ message FlockerVolumeSource { } // Represents a Persistent Disk resource in Google Compute Engine. -// +// // A GCE PD must exist before mounting to a container. The disk must // also be in the same GCE project and zone as the kubelet. A GCE PD // can only be mounted as read/write once or read-only many times. GCE @@ -1350,7 +1353,7 @@ message GCEPersistentDiskVolumeSource { // Represents a volume that is populated with the contents of a git repository. // Git repo volumes do not support ownership management. // Git repo volumes support SELinux relabeling. -// +// // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir // into the Pod's container. @@ -1370,6 +1373,30 @@ message GitRepoVolumeSource { optional string directory = 3; } +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +message GlusterfsPersistentVolumeSource { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + optional string endpoints = 1; + + // Path is the Glusterfs volume path. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + optional string path = 2; + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + optional bool readOnly = 3; + + // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + optional string endpointsNamespace = 4; +} + // Represents a Glusterfs mount that lasts the lifetime of a pod. // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsVolumeSource { @@ -2293,7 +2320,7 @@ message PersistentVolumeClaimSpec { // volumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional optional string volumeMode = 6; @@ -2386,7 +2413,7 @@ message PersistentVolumeSource { // exposed to the pod. Provisioned by an admin. // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional - optional GlusterfsVolumeSource glusterfs = 4; + optional GlusterfsPersistentVolumeSource glusterfs = 4; // NFS represents an NFS mount on the host. Provisioned by an admin. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs @@ -2509,7 +2536,7 @@ message PersistentVolumeSpec { // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional optional string volumeMode = 8; @@ -2899,11 +2926,11 @@ message PodSecurityContext { // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: - // + // // 1. The owning GID will be the FSGroup // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) // 3. The permission bits are OR'd with rw-rw---- - // + // // If unset, the Kubelet will not modify the ownership and permissions of any volume. // +optional optional int64 fsGroup = 5; @@ -3126,6 +3153,12 @@ message PodSpec { // This is an alpha feature and may change in the future. // +optional optional string runtimeClassName = 29; + + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // Optional: Defaults to true. + // +optional + optional bool enableServiceLinks = 30; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3136,7 +3169,7 @@ message PodStatus { // The conditions array, the reason and message fields, and the individual container status // arrays contain more detail about the pod's status. // There are five possible phase values: - // + // // Pending: The pod has been accepted by the Kubernetes system, but one or more of the // container images has not been created. This includes time before being scheduled as // well as time spent downloading images over the network, which could take a while. @@ -3148,7 +3181,7 @@ message PodStatus { // by the system. // Unknown: For some reason the state of the pod could not be obtained, typically due to an // error in communicating with the host of the pod. - // + // // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase // +optional optional string phase = 1; @@ -3879,7 +3912,7 @@ message Secret { // SecretEnvSource selects a Secret to populate the environment // variables with. -// +// // The contents of the target Secret's Data field will represent the // key-value pairs as environment variables. message SecretEnvSource { @@ -3917,7 +3950,7 @@ message SecretList { } // Adapts a secret into a projected volume. -// +// // The contents of the target Secret's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names. // Note that this is identical to a secret volume source without the default @@ -3953,7 +3986,7 @@ message SecretReference { } // Adapts a Secret into a volume. -// +// // The contents of the target Secret's Data field will be presented in a volume // as files using the keys in the Data field as the file names. // Secret volumes support ownership management and SELinux relabeling. diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index d9a57bd06dc75..87f3f0c5babca 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -191,7 +191,7 @@ type PersistentVolumeSource struct { // exposed to the pod. Provisioned by an admin. // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional - Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` + Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` // NFS represents an NFS mount on the host. Provisioned by an admin. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional @@ -326,7 +326,7 @@ type PersistentVolumeSpec struct { MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"` // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -455,7 +455,7 @@ type PersistentVolumeClaimSpec struct { StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` // volumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` // This field requires the VolumeSnapshotDataSource alpha feature gate to be @@ -636,6 +636,30 @@ type GlusterfsVolumeSource struct { ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsPersistentVolumeSource struct { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` + + // Path is the Glusterfs volume path. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + + // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"` +} + // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { @@ -1640,7 +1664,7 @@ type CSIPersistentVolumeSource struct { // ControllerPublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"` @@ -1648,7 +1672,7 @@ type CSIPersistentVolumeSource struct { // NodeStageSecretRef is a reference to the secret object containing sensitive // information to pass to the CSI driver to complete the CSI NodeStageVolume // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"` @@ -1656,7 +1680,7 @@ type CSIPersistentVolumeSource struct { // NodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"` @@ -2060,6 +2084,9 @@ type Container struct { // +optional // +patchMergeKey=containerPort // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` // List of sources to populate environment variables in the container. // The keys defined within a source must be a C_IDENTIFIER. All invalid keys @@ -2087,7 +2114,7 @@ type Container struct { // +patchStrategy=merge VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` // volumeDevices is the list of block devices to be used by the container. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +patchMergeKey=devicePath // +patchStrategy=merge // +optional @@ -2891,8 +2918,18 @@ type PodSpec struct { // This is an alpha feature and may change in the future. // +optional RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // Optional: Defaults to true. + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"` } +const ( + // The default value for enableServiceLinks attribute. + DefaultEnableServiceLinks = true +) + // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the // pod's hosts file. type HostAlias struct { @@ -3273,8 +3310,8 @@ type ReplicationControllerCondition struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicationController represents the configuration of a replication controller. @@ -4987,6 +5024,10 @@ const ( TLSCertKey = "tls.crt" // TLSPrivateKeyKey is the key for the private key field in a TLS secret. TLSPrivateKeyKey = "tls.key" + // SecretTypeBootstrapToken is used during the automated bootstrap process (first + // implemented by kubeadm). It stores tokens that are used to sign well known + // ConfigMaps. They are used for authn. + SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index c781e5452d9fc..13ea6d2265183 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -123,9 +123,9 @@ var map_CSIPersistentVolumeSource = map[string]string{ "readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".", "volumeAttributes": "Attributes of the volume to publish.", - "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -321,7 +321,7 @@ var map_Container = map[string]string{ "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", - "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", @@ -695,6 +695,18 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string { return map_GitRepoVolumeSource } +var map_GlusterfsPersistentVolumeSource = map[string]string{ + "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", +} + +func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_GlusterfsPersistentVolumeSource +} + var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", @@ -1210,7 +1222,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", - "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.", "dataSource": "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", } @@ -1288,7 +1300,7 @@ var map_PersistentVolumeSpec = map[string]string{ "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", "mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", - "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.", + "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is a beta feature.", "nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", } @@ -1528,6 +1540,7 @@ var map_PodSpec = map[string]string{ "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md", "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future.", + "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1636,7 +1649,7 @@ func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { } var map_Probe = map[string]string{ - "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", + "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", @@ -2201,7 +2214,7 @@ func (TopologySelectorLabelRequirement) SwaggerDoc() map[string]string { } var map_TopologySelectorTerm = map[string]string{ - "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", + "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", "matchLabelExpressions": "A list of topology selector requirements by labels.", } @@ -2285,23 +2298,23 @@ var map_VolumeSource = map[string]string{ "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", - "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", - "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "configMap": "ConfigMap represents a configMap that should populate this volume", - "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - "projected": "Items for all in one resources secrets, configmaps, and downward API", - "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", - "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "configMap": "ConfigMap represents a configMap that should populate this volume", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "projected": "Items for all in one resources secrets, configmaps, and downward API", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", } func (VolumeSource) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index f8f3471a5ed60..4219c95eb09d0 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -1498,6 +1498,27 @@ func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) { + *out = *in + if in.EndpointsNamespace != nil { + in, out := &in.EndpointsNamespace, &out.EndpointsNamespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource. +func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { *out = *in @@ -2806,8 +2827,8 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { } if in.Glusterfs != nil { in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in + *out = new(GlusterfsPersistentVolumeSource) + (*in).DeepCopyInto(*out) } if in.NFS != nil { in, out := &in.NFS, &out.NFS @@ -3554,6 +3575,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(string) **out = **in } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/api/events/v1beta1/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go index 8b1a3e312de29..bd269c6d235bb 100644 --- a/vendor/k8s.io/api/events/v1beta1/doc.go +++ b/vendor/k8s.io/api/events/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=events.k8s.io + package v1beta1 // import "k8s.io/api/events/v1beta1" diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go index e24a82ab18603..bb0c881b57122 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -254,24 +253,6 @@ func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/extensions/v1beta1/BUILD.bazel b/vendor/k8s.io/api/extensions/v1beta1/BUILD.bazel index 393fbfaec16bc..c6d03831c3a69 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/BUILD.bazel +++ b/vendor/k8s.io/api/extensions/v1beta1/BUILD.bazel @@ -18,7 +18,6 @@ go_library( "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 72d64db3edd18..a0dfa96620dba 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -27,10 +26,6 @@ limitations under the License. It has these top-level messages: AllowedFlexVolume AllowedHostPath - CustomMetricCurrentStatus - CustomMetricCurrentStatusList - CustomMetricTarget - CustomMetricTargetList DaemonSet DaemonSetCondition DaemonSetList @@ -77,6 +72,7 @@ limitations under the License. RollbackConfig RollingUpdateDaemonSet RollingUpdateDeployment + RunAsGroupStrategyOptions RunAsUserStrategyOptions SELinuxStrategyOptions Scale @@ -91,7 +87,6 @@ import fmt "fmt" import math "math" import k8s_io_api_core_v1 "k8s.io/api/core/v1" - import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" @@ -122,255 +117,235 @@ func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } func (*AllowedHostPath) ProtoMessage() {} func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} } -func (*CustomMetricCurrentStatus) ProtoMessage() {} -func (*CustomMetricCurrentStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} } -func (*CustomMetricCurrentStatusList) ProtoMessage() {} -func (*CustomMetricCurrentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} -} - -func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} } -func (*CustomMetricTarget) ProtoMessage() {} -func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} } -func (*CustomMetricTargetList) ProtoMessage() {} -func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} -} +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *HostPortRange) Reset() { *m = HostPortRange{} } func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *IDRange) Reset() { *m = IDRange{} } func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{34} + return fileDescriptorGenerated, []int{30} } func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{35} + return fileDescriptorGenerated, []int{31} } func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } func (*ReplicationControllerDummy) ProtoMessage() {} func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{48} + return fileDescriptorGenerated, []int{44} } func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{51} + return fileDescriptorGenerated, []int{47} +} + +func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } +func (*RunAsGroupStrategyOptions) ProtoMessage() {} +func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{48} } func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{52} + return fileDescriptorGenerated, []int{49} } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{57} + return fileDescriptorGenerated, []int{54} } func init() { proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") - proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatus") - proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatusList") - proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTarget") - proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTargetList") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") @@ -417,6 +392,7 @@ func init() { proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") @@ -476,126 +452,6 @@ func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *CustomMetricCurrentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomMetricCurrentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n1, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - return i, nil -} - -func (m *CustomMetricCurrentStatusList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomMetricCurrentStatusList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *CustomMetricTarget) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomMetricTarget) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - return i, nil -} - -func (m *CustomMetricTargetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomMetricTargetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -614,27 +470,27 @@ func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n1 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) + n2, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n2 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) + n3, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n3 return i, nil } @@ -664,11 +520,11 @@ func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n6, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n4, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n4 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -698,11 +554,11 @@ func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n7, err := m.ListMeta.MarshalTo(dAtA[i:]) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -737,28 +593,28 @@ func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) + n6, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n6 } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n9, err := m.Template.MarshalTo(dAtA[i:]) + n7, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n7 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n8, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n8 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -855,11 +711,11 @@ func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n9, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n9 } return i, nil } @@ -882,27 +738,27 @@ func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n10, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n10 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) + n11, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n11 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) + n12, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n12 return i, nil } @@ -940,19 +796,19 @@ func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + n13, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n13 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n14, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n14 return i, nil } @@ -974,11 +830,11 @@ func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n17, err := m.ListMeta.MarshalTo(dAtA[i:]) + n15, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n15 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1038,11 +894,11 @@ func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n18, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n16, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n16 return i, nil } @@ -1070,28 +926,28 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) + n17, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n17 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) + n18, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n18 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) + n19, err := m.Strategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n19 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -1112,11 +968,11 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n22, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n20, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n20 } if m.ProgressDeadlineSeconds != nil { dAtA[i] = 0x48 @@ -1202,11 +1058,11 @@ func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n23, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n21, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n21 } return i, nil } @@ -1267,11 +1123,11 @@ func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n24, err := m.Backend.MarshalTo(dAtA[i:]) + n22, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n22 return i, nil } @@ -1408,27 +1264,27 @@ func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n25, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n23 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n26, err := m.Spec.MarshalTo(dAtA[i:]) + n24, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n24 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n27, err := m.Status.MarshalTo(dAtA[i:]) + n25, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n25 return i, nil } @@ -1454,11 +1310,11 @@ func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n28, err := m.ServicePort.MarshalTo(dAtA[i:]) + n26, err := m.ServicePort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n26 return i, nil } @@ -1480,11 +1336,11 @@ func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n29, err := m.ListMeta.MarshalTo(dAtA[i:]) + n27, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n27 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1522,11 +1378,11 @@ func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n30, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) + n28, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n28 return i, nil } @@ -1549,11 +1405,11 @@ func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n31, err := m.HTTP.MarshalTo(dAtA[i:]) + n29, err := m.HTTP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n29 } return i, nil } @@ -1577,11 +1433,11 @@ func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n32, err := m.Backend.MarshalTo(dAtA[i:]) + n30, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n30 } if len(m.TLS) > 0 { for _, msg := range m.TLS { @@ -1628,11 +1484,11 @@ func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n33, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n31, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n31 return i, nil } @@ -1691,19 +1547,19 @@ func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n34, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n32, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n32 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n35, err := m.Spec.MarshalTo(dAtA[i:]) + n33, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n33 return i, nil } @@ -1809,11 +1665,11 @@ func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n36, err := m.ListMeta.MarshalTo(dAtA[i:]) + n34, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n34 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1848,31 +1704,31 @@ func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n37, err := m.PodSelector.MarshalTo(dAtA[i:]) + n35, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n35 } if m.NamespaceSelector != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n38, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + n36, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n36 } if m.IPBlock != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n39, err := m.IPBlock.MarshalTo(dAtA[i:]) + n37, err := m.IPBlock.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n37 } return i, nil } @@ -1902,11 +1758,11 @@ func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n40, err := m.Port.MarshalTo(dAtA[i:]) + n38, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n38 } return i, nil } @@ -1929,11 +1785,11 @@ func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n41, err := m.PodSelector.MarshalTo(dAtA[i:]) + n39, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n39 if len(m.Ingress) > 0 { for _, msg := range m.Ingress { dAtA[i] = 0x12 @@ -1994,19 +1850,19 @@ func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n42, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n40, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n40 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n43, err := m.Spec.MarshalTo(dAtA[i:]) + n41, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n41 return i, nil } @@ -2028,11 +1884,11 @@ func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n44, err := m.ListMeta.MarshalTo(dAtA[i:]) + n42, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n44 + i += n42 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2170,35 +2026,35 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n45, err := m.SELinux.MarshalTo(dAtA[i:]) + n43, err := m.SELinux.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n45 + i += n43 dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n46, err := m.RunAsUser.MarshalTo(dAtA[i:]) + n44, err := m.RunAsUser.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n46 + i += n44 dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n47, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) + n45, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n47 + i += n45 dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n48, err := m.FSGroup.MarshalTo(dAtA[i:]) + n46, err := m.FSGroup.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n46 dAtA[i] = 0x70 i++ if m.ReadOnlyRootFilesystem { @@ -2308,6 +2164,18 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.RunAsGroup != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsGroup.Size())) + n47, err := m.RunAsGroup.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + } return i, nil } @@ -2329,27 +2197,27 @@ func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n49, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n48, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n48 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n50, err := m.Spec.MarshalTo(dAtA[i:]) + n49, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n49 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n51, err := m.Status.MarshalTo(dAtA[i:]) + n50, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n50 return i, nil } @@ -2379,11 +2247,11 @@ func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n52, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n51, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n51 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -2413,11 +2281,11 @@ func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n53, err := m.ListMeta.MarshalTo(dAtA[i:]) + n52, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n52 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2457,20 +2325,20 @@ func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n54, err := m.Selector.MarshalTo(dAtA[i:]) + n53, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n53 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n55, err := m.Template.MarshalTo(dAtA[i:]) + n54, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n54 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -2580,11 +2448,11 @@ func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n55, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n55 } return i, nil } @@ -2608,21 +2476,55 @@ func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n57, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n56 } if m.MaxSurge != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n58, err := m.MaxSurge.MarshalTo(dAtA[i:]) + n57, err := m.MaxSurge.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n57 + } + return i, nil +} + +func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } return i, nil } @@ -2684,11 +2586,11 @@ func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n59, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n58, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n58 } return i, nil } @@ -2711,27 +2613,27 @@ func (m *Scale) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n60, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n59, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n59 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n61, err := m.Spec.MarshalTo(dAtA[i:]) + n60, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n61 + i += n60 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n62, err := m.Status.MarshalTo(dAtA[i:]) + n61, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n62 + i += n61 return i, nil } @@ -2837,24 +2739,6 @@ func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2881,63 +2765,19 @@ func (m *AllowedHostPath) Size() (n int) { return n } -func (m *CustomMetricCurrentStatus) Size() (n int) { +func (m *DaemonSet) Size() (n int) { var l int _ = l - l = len(m.Name) + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.CurrentValue.Size() + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *CustomMetricCurrentStatusList) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CustomMetricTarget) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.TargetValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CustomMetricTargetList) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DaemonSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DaemonSetCondition) Size() (n int) { +func (m *DaemonSetCondition) Size() (n int) { var l int _ = l l = len(m.Type) @@ -3535,6 +3375,10 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if m.RunAsGroup != nil { + l = m.RunAsGroup.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -3650,6 +3494,20 @@ func (m *RollingUpdateDeployment) Size() (n int) { return n } +func (m *RunAsGroupStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *RunAsUserStrategyOptions) Size() (n int) { var l int _ = l @@ -3760,48 +3618,6 @@ func (this *AllowedHostPath) String() string { }, "") return s } -func (this *CustomMetricCurrentStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomMetricCurrentStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomMetricCurrentStatusList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomMetricCurrentStatusList{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricCurrentStatus", "CustomMetricCurrentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomMetricTarget) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomMetricTarget{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomMetricTargetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomMetricTargetList{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricTarget", "CustomMetricTarget", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *DaemonSet) String() string { if this == nil { return "nil" @@ -4271,6 +4087,7 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, + `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, `}`, }, "") return s @@ -4380,6 +4197,17 @@ func (this *RollingUpdateDeployment) String() string { }, "") return s } +func (this *RunAsGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" @@ -4597,385 +4425,16 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricCurrentStatusList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricCurrentStatusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricCurrentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CustomMetricCurrentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricTarget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricTargetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricTargetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricTargetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - var msglen int + m.PathPrefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4985,23 +4444,12 @@ func (m *CustomMetricTargetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CustomMetricTarget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6597,51 +6045,14 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6651,41 +6062,80 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.UpdatedAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.UpdatedAnnotations[mapkey] = mapvalue } + m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -10495,6 +9945,39 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RunAsGroup == nil { + m.RunAsGroup = &RunAsGroupStrategyOptions{} + } + if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11609,6 +11092,116 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } return nil } +func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -12114,51 +11707,14 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12168,41 +11724,80 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -12474,235 +12069,230 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 3665 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x49, - 0x56, 0xef, 0xac, 0x2a, 0xbb, 0xca, 0xcf, 0xed, 0xaf, 0xb0, 0xdb, 0x5d, 0xdb, 0x33, 0xed, 0xea, - 0xcd, 0x91, 0x9a, 0x9e, 0xa1, 0xa7, 0x6a, 0xba, 0xe7, 0x63, 0x87, 0x69, 0xb1, 0xbb, 0x2e, 0xbb, - 0xdd, 0xed, 0xc5, 0x1f, 0x35, 0x51, 0x76, 0xb3, 0x8c, 0x98, 0x65, 0xd2, 0x55, 0xe1, 0x72, 0x8e, - 0xb3, 0x32, 0x73, 0x33, 0x22, 0xbd, 0x2e, 0x09, 0x21, 0x0e, 0x08, 0x09, 0x09, 0x04, 0x1c, 0x96, - 0x0f, 0x71, 0x61, 0x2f, 0x9c, 0x40, 0x70, 0x83, 0xc3, 0x6a, 0x24, 0xa4, 0x45, 0x1a, 0xa1, 0x45, - 0xda, 0x1b, 0x7b, 0xb2, 0x18, 0xcf, 0x09, 0xf1, 0x0f, 0xa0, 0x3e, 0x20, 0x14, 0x91, 0x91, 0xdf, - 0x99, 0xae, 0x2a, 0x4f, 0xb7, 0x85, 0xd0, 0xde, 0x2a, 0xe3, 0xbd, 0xf7, 0x7b, 0x2f, 0x22, 0x5e, - 0xbc, 0xf7, 0xe2, 0xa3, 0x60, 0xe3, 0xf8, 0x7d, 0x5a, 0xd7, 0xad, 0xc6, 0xb1, 0x7b, 0x40, 0x1c, - 0x93, 0x30, 0x42, 0x1b, 0x27, 0xc4, 0xec, 0x5a, 0x4e, 0x43, 0x12, 0x34, 0x5b, 0x6f, 0x90, 0x53, - 0x46, 0x4c, 0xaa, 0x5b, 0x26, 0x6d, 0x9c, 0x3c, 0x38, 0x20, 0x4c, 0x7b, 0xd0, 0xe8, 0x11, 0x93, - 0x38, 0x1a, 0x23, 0xdd, 0xba, 0xed, 0x58, 0xcc, 0x42, 0xb7, 0x3d, 0xf6, 0xba, 0x66, 0xeb, 0xf5, - 0x90, 0xbd, 0x2e, 0xd9, 0x6f, 0xbd, 0xd9, 0xd3, 0xd9, 0x91, 0x7b, 0x50, 0xef, 0x58, 0xfd, 0x46, - 0xcf, 0xea, 0x59, 0x0d, 0x21, 0x75, 0xe0, 0x1e, 0x8a, 0x2f, 0xf1, 0x21, 0x7e, 0x79, 0x68, 0xb7, - 0xd4, 0x88, 0xf2, 0x8e, 0xe5, 0x90, 0xc6, 0x49, 0x4a, 0xe3, 0xad, 0x77, 0x42, 0x9e, 0xbe, 0xd6, - 0x39, 0xd2, 0x4d, 0xe2, 0x0c, 0x1a, 0xf6, 0x71, 0x4f, 0x08, 0x39, 0x84, 0x5a, 0xae, 0xd3, 0x21, - 0x63, 0x49, 0xd1, 0x46, 0x9f, 0x30, 0x2d, 0x4b, 0x57, 0x23, 0x4f, 0xca, 0x71, 0x4d, 0xa6, 0xf7, - 0xd3, 0x6a, 0xde, 0x1b, 0x26, 0x40, 0x3b, 0x47, 0xa4, 0xaf, 0xa5, 0xe4, 0xde, 0xce, 0x93, 0x73, - 0x99, 0x6e, 0x34, 0x74, 0x93, 0x51, 0xe6, 0x24, 0x85, 0xd4, 0x47, 0xb0, 0xb0, 0x6a, 0x18, 0xd6, - 0x0f, 0x48, 0x77, 0xc3, 0x20, 0xa7, 0xcf, 0x2c, 0xc3, 0xed, 0x13, 0x74, 0x17, 0x26, 0xbb, 0x8e, - 0x7e, 0x42, 0x9c, 0xaa, 0x72, 0x47, 0xb9, 0x37, 0xd5, 0x9c, 0xfd, 0xfc, 0xac, 0x76, 0xed, 0xfc, - 0xac, 0x36, 0xb9, 0x2e, 0x5a, 0xb1, 0xa4, 0xaa, 0x14, 0xe6, 0xa4, 0xf0, 0x53, 0x8b, 0xb2, 0x96, - 0xc6, 0x8e, 0xd0, 0x43, 0x00, 0x5b, 0x63, 0x47, 0x2d, 0x87, 0x1c, 0xea, 0xa7, 0x52, 0x1c, 0x49, - 0x71, 0x68, 0x05, 0x14, 0x1c, 0xe1, 0x42, 0xf7, 0xa1, 0xe2, 0x10, 0xad, 0xbb, 0x6b, 0x1a, 0x83, - 0x6a, 0xe1, 0x8e, 0x72, 0xaf, 0xd2, 0x9c, 0x97, 0x12, 0x15, 0x2c, 0xdb, 0x71, 0xc0, 0xa1, 0xfe, - 0xa5, 0x02, 0x5f, 0x5b, 0x73, 0x29, 0xb3, 0xfa, 0xdb, 0x84, 0x39, 0x7a, 0x67, 0xcd, 0x75, 0x1c, - 0x62, 0xb2, 0x36, 0xd3, 0x98, 0x4b, 0xd1, 0x1d, 0x28, 0x99, 0x5a, 0x9f, 0x48, 0xcd, 0xd7, 0x25, - 0x4e, 0x69, 0x47, 0xeb, 0x13, 0x2c, 0x28, 0xe8, 0x23, 0x98, 0x38, 0xd1, 0x0c, 0x97, 0x08, 0x55, - 0xd3, 0x0f, 0xeb, 0xf5, 0xd0, 0xfb, 0x82, 0x61, 0xab, 0xdb, 0xc7, 0x3d, 0xe1, 0x8e, 0xbe, 0x2f, - 0xd4, 0x3f, 0x74, 0x35, 0x93, 0xe9, 0x6c, 0xd0, 0x5c, 0x92, 0x90, 0xd7, 0xa5, 0xde, 0x67, 0x1c, - 0x0b, 0x7b, 0x90, 0xea, 0xef, 0xc0, 0xed, 0x5c, 0xd3, 0xb6, 0x74, 0xca, 0xd0, 0xc7, 0x30, 0xa1, - 0x33, 0xd2, 0xa7, 0x55, 0xe5, 0x4e, 0xf1, 0xde, 0xf4, 0xc3, 0xf7, 0xeb, 0x17, 0xba, 0x7e, 0x3d, - 0x17, 0xac, 0x39, 0x23, 0xcd, 0x98, 0xd8, 0xe4, 0x70, 0xd8, 0x43, 0x55, 0xff, 0x54, 0x01, 0x14, - 0x95, 0xd9, 0xd3, 0x9c, 0x1e, 0x61, 0x23, 0x0c, 0xca, 0x6f, 0x7c, 0xb5, 0x41, 0x59, 0x94, 0x90, - 0xd3, 0x9e, 0xc2, 0xd8, 0x98, 0xd8, 0xb0, 0x9c, 0x36, 0x49, 0x0c, 0xc6, 0xb3, 0xf8, 0x60, 0x3c, - 0x18, 0x63, 0x30, 0x3c, 0x94, 0x9c, 0x51, 0xf8, 0x61, 0x01, 0xa6, 0xd6, 0x35, 0xd2, 0xb7, 0xcc, - 0x36, 0x61, 0xe8, 0x13, 0xa8, 0xf0, 0xa5, 0xd9, 0xd5, 0x98, 0x26, 0x06, 0x60, 0xfa, 0xe1, 0x5b, - 0x17, 0xf5, 0x8e, 0xd6, 0x39, 0x77, 0xfd, 0xe4, 0x41, 0x7d, 0xf7, 0xe0, 0x53, 0xd2, 0x61, 0xdb, - 0x84, 0x69, 0xa1, 0x07, 0x87, 0x6d, 0x38, 0x40, 0x45, 0x3b, 0x50, 0xa2, 0x36, 0xe9, 0xc8, 0xb1, - 0xbb, 0x3f, 0xa4, 0x1b, 0x81, 0x65, 0x6d, 0x9b, 0x74, 0xc2, 0xc9, 0xe0, 0x5f, 0x58, 0xe0, 0xa0, - 0x67, 0x30, 0x49, 0xc5, 0x2c, 0x57, 0x8b, 0xa9, 0xd9, 0xb8, 0x18, 0xd1, 0xf3, 0x8d, 0x60, 0xb9, - 0x7a, 0xdf, 0x58, 0xa2, 0xa9, 0xff, 0x59, 0x00, 0x14, 0xf0, 0xae, 0x59, 0x66, 0x57, 0x67, 0xba, - 0x65, 0xa2, 0x0f, 0xa0, 0xc4, 0x06, 0xb6, 0xef, 0x1d, 0x77, 0x7d, 0x83, 0xf6, 0x06, 0x36, 0x79, - 0x7e, 0x56, 0x5b, 0x4e, 0x4b, 0x70, 0x0a, 0x16, 0x32, 0x68, 0x2b, 0x30, 0xb5, 0x20, 0xa4, 0xdf, - 0x89, 0xab, 0x7e, 0x7e, 0x56, 0xcb, 0x08, 0xc7, 0xf5, 0x00, 0x29, 0x6e, 0x20, 0x3a, 0x01, 0x64, - 0x68, 0x94, 0xed, 0x39, 0x9a, 0x49, 0x3d, 0x4d, 0x7a, 0x9f, 0xc8, 0x41, 0x78, 0x63, 0xb4, 0x49, - 0xe3, 0x12, 0xcd, 0x5b, 0xd2, 0x0a, 0xb4, 0x95, 0x42, 0xc3, 0x19, 0x1a, 0x78, 0xbc, 0x73, 0x88, - 0x46, 0x2d, 0xb3, 0x5a, 0x8a, 0xc7, 0x3b, 0x2c, 0x5a, 0xb1, 0xa4, 0xa2, 0xd7, 0xa1, 0xdc, 0x27, - 0x94, 0x6a, 0x3d, 0x52, 0x9d, 0x10, 0x8c, 0x73, 0x92, 0xb1, 0xbc, 0xed, 0x35, 0x63, 0x9f, 0xae, - 0xfe, 0x58, 0x81, 0x99, 0x60, 0xe4, 0x84, 0xb7, 0xff, 0x66, 0xca, 0x0f, 0xeb, 0xa3, 0x75, 0x89, - 0x4b, 0x0b, 0x2f, 0x0c, 0xa2, 0xa2, 0xdf, 0x12, 0xf1, 0xc1, 0x6d, 0x7f, 0x2d, 0x15, 0xc4, 0x5a, - 0xba, 0x37, 0xaa, 0xcb, 0xe4, 0x2c, 0xa1, 0x3f, 0x2b, 0x45, 0xcc, 0xe7, 0xae, 0x89, 0x3e, 0x86, - 0x0a, 0x25, 0x06, 0xe9, 0x30, 0xcb, 0x91, 0xe6, 0xbf, 0x3d, 0xa2, 0xf9, 0xda, 0x01, 0x31, 0xda, - 0x52, 0xb4, 0x79, 0x9d, 0xdb, 0xef, 0x7f, 0xe1, 0x00, 0x12, 0x7d, 0x08, 0x15, 0x46, 0xfa, 0xb6, - 0xa1, 0x31, 0x3f, 0x06, 0xbd, 0x16, 0xed, 0x02, 0xf7, 0x1c, 0x0e, 0xd6, 0xb2, 0xba, 0x7b, 0x92, - 0x4d, 0x2c, 0x9f, 0x60, 0x48, 0xfc, 0x56, 0x1c, 0xc0, 0xa0, 0x13, 0x98, 0x75, 0xed, 0x2e, 0xe7, - 0x64, 0x3c, 0xe3, 0xf5, 0x06, 0xd2, 0x93, 0xde, 0x1b, 0x75, 0x6c, 0xf6, 0x63, 0xd2, 0xcd, 0x65, - 0xa9, 0x6b, 0x36, 0xde, 0x8e, 0x13, 0x5a, 0xd0, 0x2a, 0xcc, 0xf5, 0x75, 0x93, 0x67, 0xae, 0x41, - 0x9b, 0x74, 0x2c, 0xb3, 0x4b, 0x85, 0x5b, 0x4d, 0x34, 0x6f, 0x4a, 0x80, 0xb9, 0xed, 0x38, 0x19, - 0x27, 0xf9, 0xd1, 0x77, 0x00, 0xf9, 0xdd, 0x78, 0xe2, 0x25, 0x6c, 0xdd, 0x32, 0x85, 0xcf, 0x15, - 0x43, 0xe7, 0xde, 0x4b, 0x71, 0xe0, 0x0c, 0x29, 0xb4, 0x05, 0x4b, 0x0e, 0x39, 0xd1, 0x79, 0x1f, - 0x9f, 0xea, 0x94, 0x59, 0xce, 0x60, 0x4b, 0xef, 0xeb, 0xac, 0x3a, 0x29, 0x6c, 0xaa, 0x9e, 0x9f, - 0xd5, 0x96, 0x70, 0x06, 0x1d, 0x67, 0x4a, 0xa9, 0x7f, 0x3e, 0x09, 0x73, 0x89, 0x78, 0x83, 0x9e, - 0xc1, 0x72, 0xc7, 0x4b, 0x4e, 0x3b, 0x6e, 0xff, 0x80, 0x38, 0xed, 0xce, 0x11, 0xe9, 0xba, 0x06, - 0xe9, 0x0a, 0x47, 0x99, 0x68, 0xae, 0x48, 0x8b, 0x97, 0xd7, 0x32, 0xb9, 0x70, 0x8e, 0x34, 0x1f, - 0x05, 0x53, 0x34, 0x6d, 0xeb, 0x94, 0x06, 0x98, 0x05, 0x81, 0x19, 0x8c, 0xc2, 0x4e, 0x8a, 0x03, - 0x67, 0x48, 0x71, 0x1b, 0xbb, 0x84, 0xea, 0x0e, 0xe9, 0x26, 0x6d, 0x2c, 0xc6, 0x6d, 0x5c, 0xcf, - 0xe4, 0xc2, 0x39, 0xd2, 0xe8, 0x5d, 0x98, 0xf6, 0xb4, 0x89, 0xf9, 0x93, 0x13, 0x1d, 0xa4, 0xc3, - 0x9d, 0x90, 0x84, 0xa3, 0x7c, 0xbc, 0x6b, 0xd6, 0x01, 0x25, 0xce, 0x09, 0xe9, 0xe6, 0x4f, 0xf0, - 0x6e, 0x8a, 0x03, 0x67, 0x48, 0xf1, 0xae, 0x79, 0x1e, 0x98, 0xea, 0xda, 0x64, 0xbc, 0x6b, 0xfb, - 0x99, 0x5c, 0x38, 0x47, 0x9a, 0xfb, 0xb1, 0x67, 0xf2, 0xea, 0x89, 0xa6, 0x1b, 0xda, 0x81, 0x41, - 0xaa, 0xe5, 0xb8, 0x1f, 0xef, 0xc4, 0xc9, 0x38, 0xc9, 0x8f, 0x9e, 0xc0, 0x82, 0xd7, 0xb4, 0x6f, - 0x6a, 0x01, 0x48, 0x45, 0x80, 0x7c, 0x4d, 0x82, 0x2c, 0xec, 0x24, 0x19, 0x70, 0x5a, 0x06, 0x7d, - 0x00, 0xb3, 0x1d, 0xcb, 0x30, 0x84, 0x3f, 0xae, 0x59, 0xae, 0xc9, 0xaa, 0x53, 0x02, 0x05, 0xf1, - 0xf5, 0xb8, 0x16, 0xa3, 0xe0, 0x04, 0x27, 0x22, 0x00, 0x1d, 0x3f, 0xe1, 0xd0, 0x2a, 0x8c, 0x54, - 0x6b, 0xa4, 0x93, 0x5e, 0x58, 0x03, 0x04, 0x4d, 0x14, 0x47, 0x80, 0xd5, 0x7f, 0x55, 0xe0, 0x66, - 0x4e, 0xe8, 0x40, 0xdf, 0x8a, 0xa5, 0xd8, 0x5f, 0x4e, 0xa4, 0xd8, 0x57, 0x72, 0xc4, 0x22, 0x79, - 0xd6, 0x84, 0x19, 0x87, 0xf7, 0xca, 0xec, 0x79, 0x2c, 0x32, 0x46, 0xbe, 0x3b, 0xa4, 0x1b, 0x38, - 0x2a, 0x13, 0xc6, 0xfc, 0x85, 0xf3, 0xb3, 0xda, 0x4c, 0x8c, 0x86, 0xe3, 0xf0, 0xea, 0x5f, 0x14, - 0x00, 0xd6, 0x89, 0x6d, 0x58, 0x83, 0x3e, 0x31, 0xaf, 0xa2, 0x86, 0xda, 0x8d, 0xd5, 0x50, 0x6f, - 0x0e, 0x9b, 0x9e, 0xc0, 0xb4, 0xdc, 0x22, 0xea, 0xd7, 0x13, 0x45, 0x54, 0x63, 0x74, 0xc8, 0x8b, - 0xab, 0xa8, 0x7f, 0x2f, 0xc2, 0x62, 0xc8, 0x1c, 0x96, 0x51, 0x8f, 0x62, 0x73, 0xfc, 0x4b, 0x89, - 0x39, 0xbe, 0x99, 0x21, 0xf2, 0xd2, 0xea, 0xa8, 0x17, 0x5f, 0xcf, 0xa0, 0x4f, 0x61, 0x96, 0x17, - 0x4e, 0x9e, 0x7b, 0x88, 0xb2, 0x6c, 0x72, 0xec, 0xb2, 0x2c, 0x48, 0xa0, 0x5b, 0x31, 0x24, 0x9c, - 0x40, 0xce, 0x29, 0x03, 0xcb, 0x2f, 0xbb, 0x0c, 0x54, 0x3f, 0x53, 0x60, 0x36, 0x9c, 0xa6, 0x2b, - 0x28, 0xda, 0x76, 0xe2, 0x45, 0xdb, 0xeb, 0x23, 0xbb, 0x68, 0x4e, 0xd5, 0xf6, 0xdf, 0xbc, 0xc0, - 0x0f, 0x98, 0xf8, 0x02, 0x3f, 0xd0, 0x3a, 0xc7, 0x23, 0x6c, 0xff, 0x7e, 0xa8, 0x00, 0x92, 0x59, - 0x60, 0xd5, 0x34, 0x2d, 0xa6, 0x79, 0xb1, 0xd2, 0x33, 0x6b, 0x73, 0x64, 0xb3, 0x7c, 0x8d, 0xf5, - 0xfd, 0x14, 0xd6, 0x63, 0x93, 0x39, 0x83, 0x70, 0x46, 0xd2, 0x0c, 0x38, 0xc3, 0x00, 0xa4, 0x01, - 0x38, 0x12, 0x73, 0xcf, 0x92, 0x0b, 0xf9, 0xcd, 0x11, 0x62, 0x1e, 0x17, 0x58, 0xb3, 0xcc, 0x43, - 0xbd, 0x17, 0x86, 0x1d, 0x1c, 0x00, 0xe1, 0x08, 0xe8, 0xad, 0xc7, 0x70, 0x33, 0xc7, 0x5a, 0x34, - 0x0f, 0xc5, 0x63, 0x32, 0xf0, 0x86, 0x0d, 0xf3, 0x9f, 0x68, 0x29, 0xba, 0x4d, 0x9e, 0x92, 0x3b, - 0xdc, 0x0f, 0x0a, 0xef, 0x2b, 0xea, 0x8f, 0x27, 0xa2, 0xbe, 0x23, 0x2a, 0xe6, 0x7b, 0x50, 0x71, - 0x88, 0x6d, 0xe8, 0x1d, 0x8d, 0xca, 0x42, 0xe8, 0xba, 0x77, 0xa4, 0xe1, 0xb5, 0xe1, 0x80, 0x1a, - 0xab, 0xad, 0x0b, 0x2f, 0xb7, 0xb6, 0x2e, 0xbe, 0x98, 0xda, 0xfa, 0xb7, 0xa0, 0x42, 0xfd, 0xaa, - 0xba, 0x24, 0x20, 0x1f, 0x8c, 0x11, 0x5f, 0x65, 0x41, 0x1d, 0x28, 0x08, 0x4a, 0xe9, 0x00, 0x34, - 0xab, 0x88, 0x9e, 0x18, 0xb3, 0x88, 0x7e, 0xa1, 0x85, 0x2f, 0x8f, 0xa9, 0xb6, 0xe6, 0x52, 0xd2, - 0x15, 0x81, 0xa8, 0x12, 0xc6, 0xd4, 0x96, 0x68, 0xc5, 0x92, 0x8a, 0x3e, 0x8e, 0xb9, 0x6c, 0xe5, - 0x32, 0x2e, 0x3b, 0x9b, 0xef, 0xae, 0x68, 0x1f, 0x6e, 0xda, 0x8e, 0xd5, 0x73, 0x08, 0xa5, 0xeb, - 0x44, 0xeb, 0x1a, 0xba, 0x49, 0xfc, 0xf1, 0xf1, 0x2a, 0xa2, 0x57, 0xce, 0xcf, 0x6a, 0x37, 0x5b, - 0xd9, 0x2c, 0x38, 0x4f, 0x56, 0xfd, 0xbc, 0x04, 0xf3, 0xc9, 0x0c, 0x98, 0x53, 0xa4, 0x2a, 0x97, - 0x2a, 0x52, 0xef, 0x47, 0x16, 0x83, 0x57, 0xc1, 0x47, 0xce, 0xf8, 0x52, 0x0b, 0x62, 0x15, 0xe6, - 0x64, 0x34, 0xf0, 0x89, 0xb2, 0x4c, 0x0f, 0x66, 0x7f, 0x3f, 0x4e, 0xc6, 0x49, 0x7e, 0x5e, 0x7a, - 0x86, 0x15, 0xa5, 0x0f, 0x52, 0x8a, 0x97, 0x9e, 0xab, 0x49, 0x06, 0x9c, 0x96, 0x41, 0xdb, 0xb0, - 0xe8, 0x9a, 0x69, 0x28, 0xcf, 0x1b, 0x5f, 0x91, 0x50, 0x8b, 0xfb, 0x69, 0x16, 0x9c, 0x25, 0x87, - 0x0e, 0x63, 0xd5, 0xe8, 0xa4, 0x88, 0xb0, 0x0f, 0x47, 0x5e, 0x3b, 0x23, 0x97, 0xa3, 0xe8, 0x11, - 0xcc, 0x38, 0x62, 0xdf, 0xe1, 0x1b, 0xec, 0xd5, 0xee, 0x37, 0xa4, 0xd8, 0x0c, 0x8e, 0x12, 0x71, - 0x9c, 0x37, 0xa3, 0xdc, 0xae, 0x8c, 0x5a, 0x6e, 0xab, 0xff, 0xac, 0x44, 0x93, 0x50, 0x50, 0x02, - 0x0f, 0x3b, 0x65, 0x4a, 0x49, 0x44, 0xaa, 0x23, 0x2b, 0xbb, 0xfa, 0x7d, 0x6f, 0xac, 0xea, 0x37, - 0x4c, 0x9e, 0xc3, 0xcb, 0xdf, 0x1f, 0x29, 0xb0, 0xbc, 0xd1, 0x7e, 0xe2, 0x58, 0xae, 0xed, 0x9b, - 0xb3, 0x6b, 0x7b, 0xe3, 0xfa, 0x0d, 0x28, 0x39, 0xae, 0xe1, 0xf7, 0xe3, 0x35, 0xbf, 0x1f, 0xd8, - 0x35, 0x78, 0x3f, 0x16, 0x13, 0x52, 0x5e, 0x27, 0xb8, 0x00, 0xda, 0x81, 0x49, 0x47, 0x33, 0x7b, - 0xc4, 0x4f, 0xab, 0x77, 0x87, 0x58, 0xbf, 0xb9, 0x8e, 0x39, 0x7b, 0xa4, 0x78, 0x13, 0xd2, 0x58, - 0xa2, 0xa8, 0x7f, 0xa4, 0xc0, 0xdc, 0xd3, 0xbd, 0xbd, 0xd6, 0xa6, 0x29, 0x56, 0xb4, 0x38, 0x7d, - 0xbf, 0x03, 0x25, 0x5b, 0x63, 0x47, 0xc9, 0x4c, 0xcf, 0x69, 0x58, 0x50, 0xd0, 0x77, 0xa1, 0xcc, - 0x23, 0x09, 0x31, 0xbb, 0x23, 0x96, 0xda, 0x12, 0xbe, 0xe9, 0x09, 0x85, 0x15, 0xa2, 0x6c, 0xc0, - 0x3e, 0x9c, 0x7a, 0x0c, 0x4b, 0x11, 0x73, 0xf8, 0x78, 0x88, 0x63, 0x60, 0xd4, 0x86, 0x09, 0xae, - 0xd9, 0x3f, 0xe5, 0x1d, 0x76, 0x98, 0x99, 0xe8, 0x52, 0x58, 0xe9, 0xf0, 0x2f, 0x8a, 0x3d, 0x2c, - 0x75, 0x1b, 0x66, 0xc4, 0x95, 0x83, 0xe5, 0x30, 0x31, 0x2c, 0xe8, 0x36, 0x14, 0xfb, 0xba, 0x29, - 0xf3, 0xec, 0xb4, 0x94, 0x29, 0xf2, 0x1c, 0xc1, 0xdb, 0x05, 0x59, 0x3b, 0x95, 0x91, 0x27, 0x24, - 0x6b, 0xa7, 0x98, 0xb7, 0xab, 0x4f, 0xa0, 0x2c, 0x87, 0x3b, 0x0a, 0x54, 0xbc, 0x18, 0xa8, 0x98, - 0x01, 0xb4, 0x0b, 0xe5, 0xcd, 0x56, 0xd3, 0xb0, 0xbc, 0xaa, 0xab, 0xa3, 0x77, 0x9d, 0xe4, 0x5c, - 0xac, 0x6d, 0xae, 0x63, 0x2c, 0x28, 0x48, 0x85, 0x49, 0x72, 0xda, 0x21, 0x36, 0x13, 0x1e, 0x31, - 0xd5, 0x04, 0x3e, 0xcb, 0x8f, 0x45, 0x0b, 0x96, 0x14, 0xf5, 0x8f, 0x0b, 0x50, 0x96, 0xc3, 0x71, - 0x05, 0xbb, 0xb0, 0xad, 0xd8, 0x2e, 0xec, 0x8d, 0xd1, 0x5c, 0x23, 0x77, 0x0b, 0xb6, 0x97, 0xd8, - 0x82, 0xdd, 0x1f, 0x11, 0xef, 0xe2, 0xfd, 0xd7, 0x3f, 0x28, 0x30, 0x1b, 0x77, 0x4a, 0xf4, 0x2e, - 0x4c, 0xf3, 0x84, 0xa3, 0x77, 0xc8, 0x4e, 0x58, 0xe7, 0x06, 0x87, 0x30, 0xed, 0x90, 0x84, 0xa3, - 0x7c, 0xa8, 0x17, 0x88, 0x71, 0x3f, 0x92, 0x9d, 0xce, 0x1f, 0x52, 0x97, 0xe9, 0x46, 0xdd, 0xbb, - 0x46, 0xab, 0x6f, 0x9a, 0x6c, 0xd7, 0x69, 0x33, 0x47, 0x37, 0x7b, 0x29, 0x45, 0xc2, 0x29, 0xa3, - 0xc8, 0xea, 0x3f, 0x29, 0x30, 0x2d, 0x4d, 0xbe, 0x82, 0x5d, 0xc5, 0xaf, 0xc5, 0x77, 0x15, 0x77, - 0x47, 0x5c, 0xe0, 0xd9, 0x5b, 0x8a, 0xbf, 0x09, 0x4d, 0xe7, 0x4b, 0x9a, 0x7b, 0xf5, 0x91, 0x45, - 0x59, 0xd2, 0xab, 0xf9, 0x62, 0xc4, 0x82, 0x82, 0x5c, 0x98, 0xd7, 0x13, 0x31, 0x40, 0x0e, 0x6d, - 0x63, 0x34, 0x4b, 0x02, 0xb1, 0x66, 0x55, 0xc2, 0xcf, 0x27, 0x29, 0x38, 0xa5, 0x42, 0x25, 0x90, - 0xe2, 0x42, 0x1f, 0x42, 0xe9, 0x88, 0x31, 0x3b, 0xe3, 0xbc, 0x7a, 0x48, 0xe4, 0x09, 0x4d, 0xa8, - 0x88, 0xde, 0xed, 0xed, 0xb5, 0xb0, 0x80, 0x52, 0xff, 0x27, 0x1c, 0x8f, 0xb6, 0xe7, 0xe3, 0x41, - 0x3c, 0x55, 0x2e, 0x13, 0x4f, 0xa7, 0xb3, 0x62, 0x29, 0x7a, 0x0a, 0x45, 0x66, 0x8c, 0xba, 0x2d, - 0x94, 0x88, 0x7b, 0x5b, 0xed, 0x30, 0x20, 0xed, 0x6d, 0xb5, 0x31, 0x87, 0x40, 0xbb, 0x30, 0xc1, - 0xb3, 0x0f, 0x5f, 0x82, 0xc5, 0xd1, 0x97, 0x34, 0xef, 0x7f, 0xe8, 0x10, 0xfc, 0x8b, 0x62, 0x0f, - 0x47, 0xfd, 0x3e, 0xcc, 0xc4, 0xd6, 0x29, 0xfa, 0x04, 0xae, 0x1b, 0x96, 0xd6, 0x6d, 0x6a, 0x86, - 0x66, 0x76, 0x88, 0x7f, 0x39, 0x70, 0x37, 0x6b, 0x87, 0xb1, 0x15, 0xe1, 0x93, 0xab, 0x3c, 0xb8, - 0x4e, 0x8d, 0xd2, 0x70, 0x0c, 0x51, 0xd5, 0x00, 0xc2, 0x3e, 0xa2, 0x1a, 0x4c, 0x70, 0x3f, 0xf3, - 0xf2, 0xc9, 0x54, 0x73, 0x8a, 0x5b, 0xc8, 0xdd, 0x8f, 0x62, 0xaf, 0x1d, 0x3d, 0x04, 0xa0, 0xa4, - 0xe3, 0x10, 0x26, 0x82, 0x41, 0x21, 0x7e, 0x05, 0xdd, 0x0e, 0x28, 0x38, 0xc2, 0xa5, 0xfe, 0x8b, - 0x02, 0x33, 0x3b, 0x84, 0xfd, 0xc0, 0x72, 0x8e, 0x5b, 0x96, 0xa1, 0x77, 0x06, 0x57, 0x10, 0x6c, - 0x71, 0x2c, 0xd8, 0xbe, 0x35, 0x64, 0x66, 0x62, 0xd6, 0xe5, 0x85, 0x5c, 0xf5, 0x33, 0x05, 0x6e, - 0xc6, 0x38, 0x1f, 0x87, 0x4b, 0x77, 0x1f, 0x26, 0x6c, 0xcb, 0x61, 0x7e, 0x22, 0x1e, 0x4b, 0x21, - 0x0f, 0x63, 0x91, 0x54, 0xcc, 0x61, 0xb0, 0x87, 0x86, 0xb6, 0xa0, 0xc0, 0x2c, 0xe9, 0xaa, 0xe3, - 0x61, 0x12, 0xe2, 0x34, 0x41, 0x62, 0x16, 0xf6, 0x2c, 0x5c, 0x60, 0x16, 0x9f, 0x88, 0x6a, 0x8c, - 0x2b, 0x1a, 0x7c, 0x5e, 0x52, 0x0f, 0x30, 0x94, 0x0e, 0x1d, 0xab, 0x7f, 0xe9, 0x3e, 0x04, 0x13, - 0xb1, 0xe1, 0x58, 0x7d, 0x2c, 0xb0, 0xd4, 0x9f, 0x28, 0xb0, 0x10, 0xe3, 0xbc, 0x82, 0xc0, 0xff, - 0x61, 0x3c, 0xf0, 0xdf, 0x1f, 0xa7, 0x23, 0x39, 0xe1, 0xff, 0x27, 0x85, 0x44, 0x37, 0x78, 0x87, - 0xd1, 0x21, 0x4c, 0xdb, 0x56, 0xb7, 0xfd, 0x02, 0xae, 0x03, 0xe7, 0x78, 0xde, 0x6c, 0x85, 0x58, - 0x38, 0x0a, 0x8c, 0x4e, 0x61, 0xc1, 0xd4, 0xfa, 0x84, 0xda, 0x5a, 0x87, 0xb4, 0x5f, 0xc0, 0x01, - 0xc9, 0x0d, 0x71, 0xdf, 0x90, 0x44, 0xc4, 0x69, 0x25, 0x68, 0x1b, 0xca, 0xba, 0x2d, 0xea, 0x38, - 0x59, 0xbb, 0x0c, 0xcd, 0xa2, 0x5e, 0xd5, 0xe7, 0xc5, 0x73, 0xf9, 0x81, 0x7d, 0x0c, 0xf5, 0x6f, - 0x93, 0xde, 0xc0, 0xfd, 0x0f, 0x3d, 0x81, 0x8a, 0x78, 0x84, 0xd3, 0xb1, 0x0c, 0xff, 0x66, 0x80, - 0xcf, 0x6c, 0x4b, 0xb6, 0x3d, 0x3f, 0xab, 0xbd, 0x92, 0x71, 0xe8, 0xeb, 0x93, 0x71, 0x20, 0x8c, - 0x76, 0xa0, 0x64, 0x7f, 0x95, 0x0a, 0x46, 0x24, 0x39, 0x51, 0xb6, 0x08, 0x1c, 0xf5, 0xf7, 0x8a, - 0x09, 0x73, 0x45, 0xaa, 0xfb, 0xf4, 0x85, 0xcd, 0x7a, 0x50, 0x31, 0xe5, 0xce, 0xfc, 0x01, 0x94, - 0x65, 0x86, 0x97, 0xce, 0xfc, 0x8d, 0x71, 0x9c, 0x39, 0x9a, 0xc5, 0x82, 0x0d, 0x8b, 0xdf, 0xe8, - 0x03, 0xa3, 0xef, 0xc1, 0x24, 0xf1, 0x54, 0x78, 0xb9, 0xf1, 0xbd, 0x71, 0x54, 0x84, 0x71, 0x35, - 0x2c, 0x54, 0x65, 0x9b, 0x44, 0x45, 0xdf, 0xe2, 0xe3, 0xc5, 0x79, 0xf9, 0x26, 0x90, 0x56, 0x4b, - 0x22, 0x5d, 0xdd, 0xf6, 0xba, 0x1d, 0x34, 0x3f, 0x3f, 0xab, 0x41, 0xf8, 0x89, 0xa3, 0x12, 0xea, - 0xbf, 0x29, 0xb0, 0x20, 0x46, 0xa8, 0xe3, 0x3a, 0x3a, 0x1b, 0x5c, 0x59, 0x62, 0x7a, 0x16, 0x4b, - 0x4c, 0xef, 0x0c, 0x19, 0x96, 0x94, 0x85, 0xb9, 0xc9, 0xe9, 0xa7, 0x0a, 0xdc, 0x48, 0x71, 0x5f, - 0x41, 0x5c, 0xdc, 0x8f, 0xc7, 0xc5, 0xb7, 0xc6, 0xed, 0x50, 0x4e, 0x6c, 0xfc, 0xab, 0xb9, 0x8c, - 0xee, 0x88, 0x95, 0xf2, 0x10, 0xc0, 0x76, 0xf4, 0x13, 0xdd, 0x20, 0x3d, 0x79, 0x09, 0x5e, 0x89, - 0x3c, 0x82, 0x0b, 0x28, 0x38, 0xc2, 0x85, 0x28, 0x2c, 0x77, 0xc9, 0xa1, 0xe6, 0x1a, 0x6c, 0xb5, - 0xdb, 0x5d, 0xd3, 0x6c, 0xed, 0x40, 0x37, 0x74, 0xa6, 0xcb, 0xe3, 0x82, 0xa9, 0xe6, 0x23, 0xef, - 0x72, 0x3a, 0x8b, 0xe3, 0xf9, 0x59, 0xed, 0x76, 0xd6, 0xed, 0x90, 0xcf, 0x32, 0xc0, 0x39, 0xd0, - 0x68, 0x00, 0x55, 0x87, 0x7c, 0xdf, 0xd5, 0x1d, 0xd2, 0x5d, 0x77, 0x2c, 0x3b, 0xa6, 0xb6, 0x28, - 0xd4, 0xfe, 0xea, 0xf9, 0x59, 0xad, 0x8a, 0x73, 0x78, 0x86, 0x2b, 0xce, 0x85, 0x47, 0x9f, 0xc2, - 0xa2, 0xe6, 0xbd, 0x1d, 0x8c, 0x69, 0xf5, 0x56, 0xc9, 0xfb, 0xe7, 0x67, 0xb5, 0xc5, 0xd5, 0x34, - 0x79, 0xb8, 0xc2, 0x2c, 0x50, 0xd4, 0x80, 0xf2, 0x89, 0x78, 0xd9, 0x48, 0xab, 0x13, 0x02, 0x9f, - 0x27, 0x82, 0xb2, 0xf7, 0xd8, 0x91, 0x63, 0x4e, 0x6e, 0xb4, 0xc5, 0xea, 0xf3, 0xb9, 0xf8, 0x86, - 0x92, 0xd7, 0x92, 0x72, 0xc5, 0x8b, 0x13, 0xe3, 0x4a, 0x18, 0xb5, 0x9e, 0x86, 0x24, 0x1c, 0xe5, - 0x43, 0x1f, 0xc3, 0xd4, 0x91, 0x3c, 0x95, 0xa0, 0xd5, 0xf2, 0x48, 0x49, 0x38, 0x76, 0x8a, 0xd1, - 0x5c, 0x90, 0x2a, 0xa6, 0xfc, 0x66, 0x8a, 0x43, 0x44, 0xf4, 0x3a, 0x94, 0xc5, 0xc7, 0xe6, 0xba, - 0x38, 0x8e, 0xab, 0x84, 0xb1, 0xed, 0xa9, 0xd7, 0x8c, 0x7d, 0xba, 0xcf, 0xba, 0xd9, 0x5a, 0x13, - 0xc7, 0xc2, 0x09, 0xd6, 0xcd, 0xd6, 0x1a, 0xf6, 0xe9, 0xe8, 0x13, 0x28, 0x53, 0xb2, 0xa5, 0x9b, - 0xee, 0x69, 0x15, 0x46, 0xba, 0x54, 0x6e, 0x3f, 0x16, 0xdc, 0x89, 0x83, 0xb1, 0x50, 0x83, 0xa4, - 0x63, 0x1f, 0x16, 0x1d, 0xc1, 0x94, 0xe3, 0x9a, 0xab, 0x74, 0x9f, 0x12, 0xa7, 0x3a, 0x2d, 0x74, - 0x0c, 0x0b, 0xe7, 0xd8, 0xe7, 0x4f, 0x6a, 0x09, 0x46, 0x28, 0xe0, 0xc0, 0x21, 0x38, 0xfa, 0x43, - 0x05, 0x10, 0x75, 0x6d, 0xdb, 0x20, 0x7d, 0x62, 0x32, 0xcd, 0x10, 0x67, 0x71, 0xb4, 0x7a, 0x5d, - 0xe8, 0xfc, 0xf6, 0xb0, 0x7e, 0xa5, 0x04, 0x93, 0xca, 0x83, 0x43, 0xef, 0x34, 0x2b, 0xce, 0xd0, - 0xcb, 0x87, 0xf6, 0x90, 0x8a, 0xdf, 0xd5, 0x99, 0x91, 0x86, 0x36, 0xfb, 0xcc, 0x31, 0x1c, 0x5a, - 0x49, 0xc7, 0x3e, 0x2c, 0x7a, 0x06, 0xcb, 0xfe, 0xc3, 0x58, 0x6c, 0x59, 0x6c, 0x43, 0x37, 0x08, - 0x1d, 0x50, 0x46, 0xfa, 0xd5, 0x59, 0x31, 0xed, 0xc1, 0xdb, 0x0f, 0x9c, 0xc9, 0x85, 0x73, 0xa4, - 0x51, 0x1f, 0x6a, 0x7e, 0xc8, 0xe0, 0xeb, 0x29, 0x88, 0x59, 0x8f, 0x69, 0x47, 0x33, 0xbc, 0x7b, - 0x80, 0x39, 0xa1, 0xe0, 0xb5, 0xf3, 0xb3, 0x5a, 0x6d, 0xfd, 0x62, 0x56, 0x3c, 0x0c, 0x0b, 0x7d, - 0x17, 0xaa, 0x5a, 0x9e, 0x9e, 0x79, 0xa1, 0xe7, 0x55, 0x1e, 0x87, 0x72, 0x15, 0xe4, 0x4a, 0x23, - 0x06, 0xf3, 0x5a, 0xfc, 0x89, 0x32, 0xad, 0x2e, 0x8c, 0x74, 0x10, 0x99, 0x78, 0xd9, 0x1c, 0x1e, - 0x46, 0x24, 0x08, 0x14, 0xa7, 0x34, 0xa0, 0xdf, 0x06, 0xa4, 0x25, 0x5f, 0x55, 0xd3, 0x2a, 0x1a, - 0x29, 0xfd, 0xa4, 0x9e, 0x63, 0x87, 0x6e, 0x97, 0x22, 0x51, 0x9c, 0xa1, 0x07, 0x6d, 0xc1, 0x92, - 0x6c, 0xdd, 0x37, 0xa9, 0x76, 0x48, 0xda, 0x03, 0xda, 0x61, 0x06, 0xad, 0x2e, 0x8a, 0xd8, 0x27, - 0x2e, 0xbe, 0x56, 0x33, 0xe8, 0x38, 0x53, 0x0a, 0x7d, 0x1b, 0xe6, 0x0f, 0x2d, 0xe7, 0x40, 0xef, - 0x76, 0x89, 0xe9, 0x23, 0x2d, 0x09, 0xa4, 0x25, 0x3e, 0x1a, 0x1b, 0x09, 0x1a, 0x4e, 0x71, 0x23, - 0x0a, 0x37, 0x24, 0x72, 0xcb, 0xb1, 0x3a, 0xdb, 0x96, 0x6b, 0x32, 0xaf, 0x24, 0xba, 0x11, 0xa4, - 0x98, 0x1b, 0xab, 0x59, 0x0c, 0xcf, 0xcf, 0x6a, 0x77, 0xb2, 0x2b, 0xe0, 0x90, 0x09, 0x67, 0x63, - 0x8b, 0x17, 0x2c, 0xf2, 0x3e, 0xe3, 0x6a, 0x5e, 0x01, 0x8f, 0xf7, 0x82, 0x25, 0x34, 0xed, 0x85, - 0xbd, 0x60, 0x89, 0x40, 0x5e, 0x7c, 0x82, 0xfa, 0x5f, 0x05, 0x58, 0x0c, 0x99, 0x47, 0x7e, 0xc1, - 0x92, 0x21, 0xf2, 0x8b, 0x97, 0xc0, 0xc3, 0x5f, 0x02, 0x7f, 0xa6, 0xc0, 0x6c, 0x38, 0x74, 0xff, - 0xf7, 0x5e, 0x95, 0x84, 0xb6, 0xe5, 0xd4, 0xb9, 0x7f, 0x5f, 0x88, 0x76, 0xe0, 0xff, 0xfd, 0xd3, - 0x86, 0xaf, 0xfe, 0x7c, 0x57, 0xfd, 0x69, 0x11, 0xe6, 0x93, 0xab, 0x31, 0x76, 0x03, 0xae, 0x0c, - 0xbd, 0x01, 0x6f, 0xc1, 0xd2, 0xa1, 0x6b, 0x18, 0x03, 0x31, 0x0c, 0x91, 0x6b, 0x70, 0xef, 0x06, - 0xeb, 0x55, 0x29, 0xb9, 0xb4, 0x91, 0xc1, 0x83, 0x33, 0x25, 0x73, 0x6e, 0xf3, 0x8b, 0x97, 0xba, - 0xcd, 0x4f, 0x5d, 0x2e, 0x97, 0xc6, 0xb8, 0x5c, 0xce, 0xbc, 0x99, 0x9f, 0xb8, 0xc4, 0xcd, 0xfc, - 0x65, 0xae, 0xd2, 0x33, 0x82, 0xd8, 0xd0, 0x97, 0x9d, 0xaf, 0xc2, 0x2d, 0x29, 0xc6, 0xc4, 0x2d, - 0xb7, 0xc9, 0x1c, 0xcb, 0x30, 0x88, 0xb3, 0xee, 0xf6, 0xfb, 0x03, 0xf5, 0x9b, 0x30, 0x1b, 0x7f, - 0xbf, 0xe1, 0xcd, 0xb4, 0xf7, 0x84, 0x44, 0xde, 0x23, 0x46, 0x66, 0xda, 0x6b, 0xc7, 0x01, 0x87, - 0xfa, 0xfb, 0x0a, 0x2c, 0x67, 0xbf, 0xd3, 0x44, 0x06, 0xcc, 0xf6, 0xb5, 0xd3, 0xe8, 0xdb, 0x59, - 0xe5, 0x92, 0x27, 0x3c, 0xe2, 0xe2, 0x7e, 0x3b, 0x86, 0x85, 0x13, 0xd8, 0xea, 0x97, 0x0a, 0xdc, - 0xcc, 0xb9, 0x32, 0xbf, 0x5a, 0x4b, 0xd0, 0x47, 0x50, 0xe9, 0x6b, 0xa7, 0x6d, 0xd7, 0xe9, 0x91, - 0x4b, 0x9f, 0x69, 0x89, 0x88, 0xb1, 0x2d, 0x51, 0x70, 0x80, 0xa7, 0xfe, 0x48, 0x81, 0x6a, 0xde, - 0xee, 0x02, 0xbd, 0x1b, 0xbb, 0xdc, 0xff, 0x7a, 0xe2, 0x72, 0x7f, 0x21, 0x25, 0xf7, 0x92, 0xae, - 0xf6, 0xff, 0x4e, 0x81, 0xe5, 0xec, 0x5d, 0x16, 0x7a, 0x3b, 0x66, 0x61, 0x2d, 0x61, 0xe1, 0x5c, - 0x42, 0x4a, 0xda, 0xf7, 0x3d, 0x98, 0x95, 0x7b, 0x31, 0x09, 0x23, 0x47, 0x55, 0xcd, 0x8a, 0x95, - 0x12, 0xc2, 0xdf, 0x7b, 0x88, 0xf9, 0x8a, 0xb7, 0xe1, 0x04, 0x9a, 0xfa, 0x07, 0x05, 0x98, 0x68, - 0x77, 0x34, 0x83, 0x5c, 0x41, 0x99, 0xf5, 0x9d, 0x58, 0x99, 0x35, 0xec, 0x7f, 0x2e, 0xc2, 0xaa, - 0xdc, 0x0a, 0x0b, 0x27, 0x2a, 0xac, 0x37, 0x46, 0x42, 0xbb, 0xb8, 0xb8, 0xfa, 0x15, 0x98, 0x0a, - 0x94, 0x8e, 0x17, 0xf3, 0xd5, 0xbf, 0x2e, 0xc0, 0x74, 0x44, 0xc5, 0x98, 0x19, 0xe3, 0x30, 0x96, - 0x69, 0x47, 0xf9, 0x77, 0x61, 0x44, 0x57, 0xdd, 0xcf, 0xad, 0xde, 0x3b, 0xcd, 0xf0, 0x65, 0x5e, - 0x3a, 0xe5, 0x7e, 0x13, 0x66, 0x99, 0xf8, 0xf7, 0x5d, 0x70, 0x12, 0x5c, 0x14, 0xbe, 0x18, 0xbc, - 0xee, 0xdd, 0x8b, 0x51, 0x71, 0x82, 0xfb, 0xd6, 0x23, 0x98, 0x89, 0x29, 0x1b, 0xeb, 0x99, 0xe5, - 0x3f, 0x2a, 0xf0, 0xf5, 0xa1, 0xfb, 0x74, 0xd4, 0x8c, 0x2d, 0x92, 0x7a, 0x62, 0x91, 0xac, 0xe4, - 0x03, 0xbc, 0xbc, 0xe7, 0x3a, 0xcd, 0x37, 0x3f, 0xff, 0x62, 0xe5, 0xda, 0xcf, 0xbe, 0x58, 0xb9, - 0xf6, 0xf3, 0x2f, 0x56, 0xae, 0xfd, 0xee, 0xf9, 0x8a, 0xf2, 0xf9, 0xf9, 0x8a, 0xf2, 0xb3, 0xf3, - 0x15, 0xe5, 0xe7, 0xe7, 0x2b, 0xca, 0x7f, 0x9c, 0xaf, 0x28, 0x7f, 0xf2, 0xe5, 0xca, 0xb5, 0x8f, - 0xca, 0x12, 0xee, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xa7, 0xd7, 0xb5, 0x56, 0x5c, 0x3d, 0x00, - 0x00, + // 3587 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x1c, 0x47, + 0x76, 0x57, 0xcf, 0x0c, 0x39, 0xc3, 0x47, 0xf1, 0xab, 0x48, 0x91, 0x63, 0xc9, 0xe2, 0xc8, 0x6d, + 0x40, 0x91, 0x1d, 0x69, 0xc6, 0x92, 0x2d, 0x59, 0xb1, 0x10, 0xdb, 0x1c, 0x52, 0x94, 0xe8, 0xf0, + 0x63, 0x5c, 0x43, 0x2a, 0x86, 0x11, 0x3b, 0x6e, 0xce, 0x14, 0x87, 0x2d, 0xf6, 0x74, 0xb7, 0xbb, + 0x6b, 0x68, 0x0e, 0x90, 0x43, 0x0e, 0x49, 0x80, 0x00, 0x09, 0x92, 0x8b, 0x93, 0x1c, 0x63, 0x04, + 0xc8, 0x69, 0x17, 0xbb, 0xb7, 0xdd, 0x83, 0x61, 0x60, 0x01, 0x2f, 0x20, 0x2c, 0xbc, 0x80, 0x6f, + 0xeb, 0x13, 0xb1, 0xa6, 0x4f, 0x8b, 0xfd, 0x07, 0x16, 0x3a, 0x2c, 0x16, 0x55, 0x5d, 0xfd, 0xdd, + 0xad, 0x69, 0xd2, 0x12, 0xb1, 0x58, 0xec, 0x8d, 0x53, 0xef, 0xbd, 0xdf, 0x7b, 0x55, 0xf5, 0xea, + 0xbd, 0xd7, 0x55, 0x8f, 0xb0, 0xbc, 0x77, 0xdb, 0xae, 0xaa, 0x46, 0x6d, 0xaf, 0xb7, 0x4d, 0x2c, + 0x9d, 0x50, 0x62, 0xd7, 0xf6, 0x89, 0xde, 0x36, 0xac, 0x9a, 0x20, 0x28, 0xa6, 0x5a, 0x23, 0x07, + 0x94, 0xe8, 0xb6, 0x6a, 0xe8, 0x76, 0x6d, 0xff, 0xfa, 0x36, 0xa1, 0xca, 0xf5, 0x5a, 0x87, 0xe8, + 0xc4, 0x52, 0x28, 0x69, 0x57, 0x4d, 0xcb, 0xa0, 0x06, 0xba, 0xe8, 0xb0, 0x57, 0x15, 0x53, 0xad, + 0xfa, 0xec, 0x55, 0xc1, 0x7e, 0xfe, 0x5a, 0x47, 0xa5, 0xbb, 0xbd, 0xed, 0x6a, 0xcb, 0xe8, 0xd6, + 0x3a, 0x46, 0xc7, 0xa8, 0x71, 0xa9, 0xed, 0xde, 0x0e, 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x1c, 0xb4, + 0xf3, 0x72, 0x40, 0x79, 0xcb, 0xb0, 0x48, 0x6d, 0x3f, 0xa6, 0xf1, 0xfc, 0x6b, 0x3e, 0x4f, 0x57, + 0x69, 0xed, 0xaa, 0x3a, 0xb1, 0xfa, 0x35, 0x73, 0xaf, 0xc3, 0x06, 0xec, 0x5a, 0x97, 0x50, 0x25, + 0x49, 0xaa, 0x96, 0x26, 0x65, 0xf5, 0x74, 0xaa, 0x76, 0x49, 0x4c, 0xe0, 0xd6, 0x20, 0x01, 0xbb, + 0xb5, 0x4b, 0xba, 0x4a, 0x4c, 0xee, 0xd5, 0x34, 0xb9, 0x1e, 0x55, 0xb5, 0x9a, 0xaa, 0x53, 0x9b, + 0x5a, 0x51, 0x21, 0xf9, 0x0e, 0x4c, 0x2d, 0x68, 0x9a, 0xf1, 0x09, 0x69, 0x2f, 0x6b, 0xe4, 0xe0, + 0x81, 0xa1, 0xf5, 0xba, 0x04, 0x5d, 0x86, 0xe1, 0xb6, 0xa5, 0xee, 0x13, 0xab, 0x2c, 0x5d, 0x92, + 0xae, 0x8c, 0xd4, 0xc7, 0x1f, 0x1d, 0x56, 0xce, 0x1c, 0x1d, 0x56, 0x86, 0x97, 0xf8, 0x28, 0x16, + 0x54, 0xd9, 0x86, 0x09, 0x21, 0x7c, 0xdf, 0xb0, 0x69, 0x43, 0xa1, 0xbb, 0xe8, 0x06, 0x80, 0xa9, + 0xd0, 0xdd, 0x86, 0x45, 0x76, 0xd4, 0x03, 0x21, 0x8e, 0x84, 0x38, 0x34, 0x3c, 0x0a, 0x0e, 0x70, + 0xa1, 0xab, 0x50, 0xb2, 0x88, 0xd2, 0xde, 0xd0, 0xb5, 0x7e, 0x39, 0x77, 0x49, 0xba, 0x52, 0xaa, + 0x4f, 0x0a, 0x89, 0x12, 0x16, 0xe3, 0xd8, 0xe3, 0x90, 0x3f, 0xcd, 0xc1, 0xc8, 0x92, 0x42, 0xba, + 0x86, 0xde, 0x24, 0x14, 0x7d, 0x04, 0x25, 0xb6, 0xf0, 0x6d, 0x85, 0x2a, 0x5c, 0xdb, 0xe8, 0x8d, + 0x57, 0xaa, 0xbe, 0x63, 0x78, 0xeb, 0x50, 0x35, 0xf7, 0x3a, 0x6c, 0xc0, 0xae, 0x32, 0xee, 0xea, + 0xfe, 0xf5, 0xea, 0xc6, 0xf6, 0x43, 0xd2, 0xa2, 0x6b, 0x84, 0x2a, 0xbe, 0x7d, 0xfe, 0x18, 0xf6, + 0x50, 0xd1, 0x3a, 0x14, 0x6c, 0x93, 0xb4, 0xb8, 0x65, 0xa3, 0x37, 0xae, 0x56, 0x9f, 0xe8, 0x76, + 0x55, 0xcf, 0xb2, 0xa6, 0x49, 0x5a, 0xf5, 0xb3, 0x02, 0xb9, 0xc0, 0x7e, 0x61, 0x8e, 0x83, 0x1e, + 0xc0, 0xb0, 0x4d, 0x15, 0xda, 0xb3, 0xcb, 0x79, 0x8e, 0x58, 0xcd, 0x8c, 0xc8, 0xa5, 0xfc, 0xcd, + 0x70, 0x7e, 0x63, 0x81, 0x26, 0xff, 0x26, 0x07, 0xc8, 0xe3, 0x5d, 0x34, 0xf4, 0xb6, 0x4a, 0x55, + 0x43, 0x47, 0x6f, 0x40, 0x81, 0xf6, 0x4d, 0x22, 0xb6, 0xe2, 0xb2, 0x6b, 0xd0, 0x66, 0xdf, 0x24, + 0x8f, 0x0f, 0x2b, 0xb3, 0x71, 0x09, 0x46, 0xc1, 0x5c, 0x06, 0xad, 0x7a, 0xa6, 0xe6, 0xb8, 0xf4, + 0x6b, 0x61, 0xd5, 0x8f, 0x0f, 0x2b, 0x09, 0xc7, 0xa6, 0xea, 0x21, 0x85, 0x0d, 0x44, 0xfb, 0x80, + 0x34, 0xc5, 0xa6, 0x9b, 0x96, 0xa2, 0xdb, 0x8e, 0x26, 0xb5, 0x4b, 0xc4, 0x22, 0xbc, 0x9c, 0x6d, + 0xd3, 0x98, 0x44, 0xfd, 0xbc, 0xb0, 0x02, 0xad, 0xc6, 0xd0, 0x70, 0x82, 0x06, 0xe6, 0xcd, 0x16, + 0x51, 0x6c, 0x43, 0x2f, 0x17, 0xc2, 0xde, 0x8c, 0xf9, 0x28, 0x16, 0x54, 0xf4, 0x12, 0x14, 0xbb, + 0xc4, 0xb6, 0x95, 0x0e, 0x29, 0x0f, 0x71, 0xc6, 0x09, 0xc1, 0x58, 0x5c, 0x73, 0x86, 0xb1, 0x4b, + 0x97, 0x3f, 0x97, 0x60, 0xcc, 0x5b, 0xb9, 0x55, 0xd5, 0xa6, 0xe8, 0xef, 0x62, 0x7e, 0x58, 0xcd, + 0x36, 0x25, 0x26, 0xcd, 0xbd, 0xd0, 0xf3, 0x79, 0x77, 0x24, 0xe0, 0x83, 0x6b, 0x30, 0xa4, 0x52, + 0xd2, 0x65, 0xfb, 0x90, 0xbf, 0x32, 0x7a, 0xe3, 0x4a, 0x56, 0x97, 0xa9, 0x8f, 0x09, 0xd0, 0xa1, + 0x15, 0x26, 0x8e, 0x1d, 0x14, 0xf9, 0xbf, 0x0a, 0x01, 0xf3, 0x99, 0x6b, 0xa2, 0x0f, 0xa0, 0x64, + 0x13, 0x8d, 0xb4, 0xa8, 0x61, 0x09, 0xf3, 0x5f, 0xcd, 0x68, 0xbe, 0xb2, 0x4d, 0xb4, 0xa6, 0x10, + 0xad, 0x9f, 0x65, 0xf6, 0xbb, 0xbf, 0xb0, 0x07, 0x89, 0xde, 0x85, 0x12, 0x25, 0x5d, 0x53, 0x53, + 0x28, 0x11, 0xe7, 0xe8, 0xc5, 0xe0, 0x14, 0x98, 0xe7, 0x30, 0xb0, 0x86, 0xd1, 0xde, 0x14, 0x6c, + 0xfc, 0xf8, 0x78, 0x4b, 0xe2, 0x8e, 0x62, 0x0f, 0x06, 0xed, 0xc3, 0x78, 0xcf, 0x6c, 0x33, 0x4e, + 0xca, 0xe2, 0x59, 0xa7, 0x2f, 0x3c, 0xe9, 0x56, 0xd6, 0xb5, 0xd9, 0x0a, 0x49, 0xd7, 0x67, 0x85, + 0xae, 0xf1, 0xf0, 0x38, 0x8e, 0x68, 0x41, 0x0b, 0x30, 0xd1, 0x55, 0x75, 0x16, 0x97, 0xfa, 0x4d, + 0xd2, 0x32, 0xf4, 0xb6, 0xcd, 0xdd, 0x6a, 0xa8, 0x3e, 0x27, 0x00, 0x26, 0xd6, 0xc2, 0x64, 0x1c, + 0xe5, 0x47, 0xef, 0x00, 0x72, 0xa7, 0x71, 0xcf, 0x09, 0xc7, 0xaa, 0xa1, 0x73, 0x9f, 0xcb, 0xfb, + 0xce, 0xbd, 0x19, 0xe3, 0xc0, 0x09, 0x52, 0x68, 0x15, 0x66, 0x2c, 0xb2, 0xaf, 0xb2, 0x39, 0xde, + 0x57, 0x6d, 0x6a, 0x58, 0xfd, 0x55, 0xb5, 0xab, 0xd2, 0xf2, 0x30, 0xb7, 0xa9, 0x7c, 0x74, 0x58, + 0x99, 0xc1, 0x09, 0x74, 0x9c, 0x28, 0x25, 0xff, 0xf7, 0x30, 0x4c, 0x44, 0xe2, 0x0d, 0x7a, 0x00, + 0xb3, 0xad, 0x9e, 0x65, 0x11, 0x9d, 0xae, 0xf7, 0xba, 0xdb, 0xc4, 0x6a, 0xb6, 0x76, 0x49, 0xbb, + 0xa7, 0x91, 0x36, 0x77, 0x94, 0xa1, 0xfa, 0xbc, 0xb0, 0x78, 0x76, 0x31, 0x91, 0x0b, 0xa7, 0x48, + 0xb3, 0x55, 0xd0, 0xf9, 0xd0, 0x9a, 0x6a, 0xdb, 0x1e, 0x66, 0x8e, 0x63, 0x7a, 0xab, 0xb0, 0x1e, + 0xe3, 0xc0, 0x09, 0x52, 0xcc, 0xc6, 0x36, 0xb1, 0x55, 0x8b, 0xb4, 0xa3, 0x36, 0xe6, 0xc3, 0x36, + 0x2e, 0x25, 0x72, 0xe1, 0x14, 0x69, 0x74, 0x13, 0x46, 0x1d, 0x6d, 0x7c, 0xff, 0xc4, 0x46, 0x4f, + 0x0b, 0xb0, 0xd1, 0x75, 0x9f, 0x84, 0x83, 0x7c, 0x6c, 0x6a, 0xc6, 0xb6, 0x4d, 0xac, 0x7d, 0xd2, + 0x4e, 0xdf, 0xe0, 0x8d, 0x18, 0x07, 0x4e, 0x90, 0x62, 0x53, 0x73, 0x3c, 0x30, 0x36, 0xb5, 0xe1, + 0xf0, 0xd4, 0xb6, 0x12, 0xb9, 0x70, 0x8a, 0x34, 0xf3, 0x63, 0xc7, 0xe4, 0x85, 0x7d, 0x45, 0xd5, + 0x94, 0x6d, 0x8d, 0x94, 0x8b, 0x61, 0x3f, 0x5e, 0x0f, 0x93, 0x71, 0x94, 0x1f, 0xdd, 0x83, 0x29, + 0x67, 0x68, 0x4b, 0x57, 0x3c, 0x90, 0x12, 0x07, 0x79, 0x4e, 0x80, 0x4c, 0xad, 0x47, 0x19, 0x70, + 0x5c, 0x06, 0xbd, 0x01, 0xe3, 0x2d, 0x43, 0xd3, 0xb8, 0x3f, 0x2e, 0x1a, 0x3d, 0x9d, 0x96, 0x47, + 0x38, 0x0a, 0x62, 0xe7, 0x71, 0x31, 0x44, 0xc1, 0x11, 0x4e, 0x44, 0x00, 0x5a, 0x6e, 0xc2, 0xb1, + 0xcb, 0xc0, 0xe3, 0xe3, 0xf5, 0xac, 0x31, 0xc0, 0x4b, 0x55, 0x7e, 0x0d, 0xe0, 0x0d, 0xd9, 0x38, + 0x00, 0x2c, 0xff, 0x42, 0x82, 0xb9, 0x94, 0xd0, 0x81, 0xde, 0x0a, 0xa5, 0xd8, 0xbf, 0x8c, 0xa4, + 0xd8, 0x0b, 0x29, 0x62, 0x81, 0x3c, 0xab, 0xc3, 0x98, 0xc5, 0x66, 0xa5, 0x77, 0x1c, 0x16, 0x11, + 0x23, 0x6f, 0x0e, 0x98, 0x06, 0x0e, 0xca, 0xf8, 0x31, 0x7f, 0xea, 0xe8, 0xb0, 0x32, 0x16, 0xa2, + 0xe1, 0x30, 0xbc, 0xfc, 0x3f, 0x39, 0x80, 0x25, 0x62, 0x6a, 0x46, 0xbf, 0x4b, 0xf4, 0xd3, 0xa8, + 0xa1, 0x36, 0x42, 0x35, 0xd4, 0xb5, 0x41, 0xdb, 0xe3, 0x99, 0x96, 0x5a, 0x44, 0xfd, 0x6d, 0xa4, + 0x88, 0xaa, 0x65, 0x87, 0x7c, 0x72, 0x15, 0xf5, 0xab, 0x3c, 0x4c, 0xfb, 0xcc, 0x7e, 0x19, 0x75, + 0x27, 0xb4, 0xc7, 0x7f, 0x11, 0xd9, 0xe3, 0xb9, 0x04, 0x91, 0x67, 0x56, 0x47, 0x3d, 0xfd, 0x7a, + 0x06, 0x3d, 0x84, 0x71, 0x56, 0x38, 0x39, 0xee, 0xc1, 0xcb, 0xb2, 0xe1, 0x63, 0x97, 0x65, 0x5e, + 0x02, 0x5d, 0x0d, 0x21, 0xe1, 0x08, 0x72, 0x4a, 0x19, 0x58, 0x7c, 0xd6, 0x65, 0xa0, 0xfc, 0x85, + 0x04, 0xe3, 0xfe, 0x36, 0x9d, 0x42, 0xd1, 0xb6, 0x1e, 0x2e, 0xda, 0x5e, 0xca, 0xec, 0xa2, 0x29, + 0x55, 0xdb, 0xef, 0x58, 0x81, 0xef, 0x31, 0xb1, 0x03, 0xbe, 0xad, 0xb4, 0xf6, 0xd0, 0x25, 0x28, + 0xe8, 0x4a, 0xd7, 0xf5, 0x4c, 0xef, 0xb0, 0xac, 0x2b, 0x5d, 0x82, 0x39, 0x05, 0x7d, 0x2a, 0x01, + 0x12, 0x59, 0x60, 0x41, 0xd7, 0x0d, 0xaa, 0x38, 0xb1, 0xd2, 0x31, 0x6b, 0x25, 0xb3, 0x59, 0xae, + 0xc6, 0xea, 0x56, 0x0c, 0xeb, 0xae, 0x4e, 0xad, 0xbe, 0xbf, 0x23, 0x71, 0x06, 0x9c, 0x60, 0x00, + 0x52, 0x00, 0x2c, 0x81, 0xb9, 0x69, 0x88, 0x83, 0x7c, 0x2d, 0x43, 0xcc, 0x63, 0x02, 0x8b, 0x86, + 0xbe, 0xa3, 0x76, 0xfc, 0xb0, 0x83, 0x3d, 0x20, 0x1c, 0x00, 0x3d, 0x7f, 0x17, 0xe6, 0x52, 0xac, + 0x45, 0x93, 0x90, 0xdf, 0x23, 0x7d, 0x67, 0xd9, 0x30, 0xfb, 0x13, 0xcd, 0xc0, 0xd0, 0xbe, 0xa2, + 0xf5, 0x9c, 0xf0, 0x3b, 0x82, 0x9d, 0x1f, 0x6f, 0xe4, 0x6e, 0x4b, 0xf2, 0xe7, 0x43, 0x41, 0xdf, + 0xe1, 0x15, 0xf3, 0x15, 0xf6, 0xd1, 0x6a, 0x6a, 0x6a, 0x4b, 0xb1, 0x45, 0x21, 0x74, 0xd6, 0xf9, + 0x60, 0x75, 0xc6, 0xb0, 0x47, 0x0d, 0xd5, 0xd6, 0xb9, 0x67, 0x5b, 0x5b, 0xe7, 0x9f, 0x4e, 0x6d, + 0xfd, 0xf7, 0x50, 0xb2, 0xdd, 0xaa, 0xba, 0xc0, 0x21, 0xaf, 0x1f, 0x23, 0xbe, 0x8a, 0x82, 0xda, + 0x53, 0xe0, 0x95, 0xd2, 0x1e, 0x68, 0x52, 0x11, 0x3d, 0x74, 0xcc, 0x22, 0xfa, 0xa9, 0x16, 0xbe, + 0x2c, 0xa6, 0x9a, 0x4a, 0xcf, 0x26, 0x6d, 0x1e, 0x88, 0x4a, 0x7e, 0x4c, 0x6d, 0xf0, 0x51, 0x2c, + 0xa8, 0xe8, 0x83, 0x90, 0xcb, 0x96, 0x4e, 0xe2, 0xb2, 0xe3, 0xe9, 0xee, 0x8a, 0xb6, 0x60, 0xce, + 0xb4, 0x8c, 0x8e, 0x45, 0x6c, 0x7b, 0x89, 0x28, 0x6d, 0x4d, 0xd5, 0x89, 0xbb, 0x3e, 0x4e, 0x45, + 0x74, 0xe1, 0xe8, 0xb0, 0x32, 0xd7, 0x48, 0x66, 0xc1, 0x69, 0xb2, 0xf2, 0xa3, 0x02, 0x4c, 0x46, + 0x33, 0x60, 0x4a, 0x91, 0x2a, 0x9d, 0xa8, 0x48, 0xbd, 0x1a, 0x38, 0x0c, 0x4e, 0x05, 0x1f, 0xb8, + 0xc1, 0x89, 0x1d, 0x88, 0x05, 0x98, 0x10, 0xd1, 0xc0, 0x25, 0x8a, 0x32, 0xdd, 0xdb, 0xfd, 0xad, + 0x30, 0x19, 0x47, 0xf9, 0x59, 0xe9, 0xe9, 0x57, 0x94, 0x2e, 0x48, 0x21, 0x5c, 0x7a, 0x2e, 0x44, + 0x19, 0x70, 0x5c, 0x06, 0xad, 0xc1, 0x74, 0x4f, 0x8f, 0x43, 0x39, 0xde, 0x78, 0x41, 0x40, 0x4d, + 0x6f, 0xc5, 0x59, 0x70, 0x92, 0x1c, 0xda, 0x09, 0x55, 0xa3, 0xc3, 0x3c, 0xc2, 0xde, 0xc8, 0x7c, + 0x76, 0x32, 0x97, 0xa3, 0xe8, 0x0e, 0x8c, 0x59, 0xfc, 0xbb, 0xc3, 0x35, 0xd8, 0xa9, 0xdd, 0xcf, + 0x09, 0xb1, 0x31, 0x1c, 0x24, 0xe2, 0x30, 0x6f, 0x42, 0xb9, 0x5d, 0xca, 0x5a, 0x6e, 0xcb, 0x3f, + 0x93, 0x82, 0x49, 0xc8, 0x2b, 0x81, 0x07, 0xdd, 0x32, 0xc5, 0x24, 0x02, 0xd5, 0x91, 0x91, 0x5c, + 0xfd, 0xde, 0x3a, 0x56, 0xf5, 0xeb, 0x27, 0xcf, 0xc1, 0xe5, 0xef, 0x67, 0x12, 0xcc, 0x2e, 0x37, + 0xef, 0x59, 0x46, 0xcf, 0x74, 0xcd, 0xd9, 0x30, 0x9d, 0x75, 0x7d, 0x1d, 0x0a, 0x56, 0x4f, 0x73, + 0xe7, 0xf1, 0xa2, 0x3b, 0x0f, 0xdc, 0xd3, 0xd8, 0x3c, 0xa6, 0x23, 0x52, 0xce, 0x24, 0x98, 0x00, + 0x5a, 0x87, 0x61, 0x4b, 0xd1, 0x3b, 0xc4, 0x4d, 0xab, 0x97, 0x07, 0x58, 0xbf, 0xb2, 0x84, 0x19, + 0x7b, 0xa0, 0x78, 0xe3, 0xd2, 0x58, 0xa0, 0xc8, 0xff, 0x2e, 0xc1, 0xc4, 0xfd, 0xcd, 0xcd, 0xc6, + 0x8a, 0xce, 0x4f, 0x34, 0xbf, 0x5b, 0xbd, 0x04, 0x05, 0x53, 0xa1, 0xbb, 0xd1, 0x4c, 0xcf, 0x68, + 0x98, 0x53, 0xd0, 0x7b, 0x50, 0x64, 0x91, 0x84, 0xe8, 0xed, 0x8c, 0xa5, 0xb6, 0x80, 0xaf, 0x3b, + 0x42, 0x7e, 0x85, 0x28, 0x06, 0xb0, 0x0b, 0x27, 0xef, 0xc1, 0x4c, 0xc0, 0x1c, 0xb6, 0x1e, 0x0f, + 0x58, 0x76, 0x44, 0x4d, 0x18, 0x62, 0x9a, 0x59, 0x0e, 0xcc, 0x67, 0xb8, 0xcc, 0x8c, 0x4c, 0xc9, + 0xaf, 0x74, 0xd8, 0x2f, 0x1b, 0x3b, 0x58, 0xf2, 0x1a, 0x8c, 0xf1, 0x0b, 0x65, 0xc3, 0xa2, 0x7c, + 0x59, 0xd0, 0x45, 0xc8, 0x77, 0x55, 0x5d, 0xe4, 0xd9, 0x51, 0x21, 0x93, 0x67, 0x39, 0x82, 0x8d, + 0x73, 0xb2, 0x72, 0x20, 0x22, 0x8f, 0x4f, 0x56, 0x0e, 0x30, 0x1b, 0x97, 0xef, 0x41, 0x51, 0x2c, + 0x77, 0x10, 0x28, 0xff, 0x64, 0xa0, 0x7c, 0x02, 0xd0, 0x06, 0x14, 0x57, 0x1a, 0x75, 0xcd, 0x70, + 0xaa, 0xae, 0x96, 0xda, 0xb6, 0xa2, 0x7b, 0xb1, 0xb8, 0xb2, 0x84, 0x31, 0xa7, 0x20, 0x19, 0x86, + 0xc9, 0x41, 0x8b, 0x98, 0x94, 0x7b, 0xc4, 0x48, 0x1d, 0xd8, 0x2e, 0xdf, 0xe5, 0x23, 0x58, 0x50, + 0xe4, 0xff, 0xc8, 0x41, 0x51, 0x2c, 0xc7, 0x29, 0x7c, 0x85, 0xad, 0x86, 0xbe, 0xc2, 0x5e, 0xce, + 0xe6, 0x1a, 0xa9, 0x9f, 0x60, 0x9b, 0x91, 0x4f, 0xb0, 0xab, 0x19, 0xf1, 0x9e, 0xfc, 0xfd, 0xf5, + 0x63, 0x09, 0xc6, 0xc3, 0x4e, 0x89, 0x6e, 0xc2, 0x28, 0x4b, 0x38, 0x6a, 0x8b, 0xac, 0xfb, 0x75, + 0xae, 0x77, 0x09, 0xd3, 0xf4, 0x49, 0x38, 0xc8, 0x87, 0x3a, 0x9e, 0x18, 0xf3, 0x23, 0x31, 0xe9, + 0xf4, 0x25, 0xed, 0x51, 0x55, 0xab, 0x3a, 0x8f, 0x24, 0xd5, 0x15, 0x9d, 0x6e, 0x58, 0x4d, 0x6a, + 0xa9, 0x7a, 0x27, 0xa6, 0x88, 0x3b, 0x65, 0x10, 0x59, 0xfe, 0xa9, 0x04, 0xa3, 0xc2, 0xe4, 0x53, + 0xf8, 0xaa, 0xf8, 0x9b, 0xf0, 0x57, 0xc5, 0xe5, 0x8c, 0x07, 0x3c, 0xf9, 0x93, 0xe2, 0xff, 0x7d, + 0xd3, 0xd9, 0x91, 0x66, 0x5e, 0xbd, 0x6b, 0xd8, 0x34, 0xea, 0xd5, 0xec, 0x30, 0x62, 0x4e, 0x41, + 0x3d, 0x98, 0x54, 0x23, 0x31, 0x40, 0x2c, 0x6d, 0x2d, 0x9b, 0x25, 0x9e, 0x58, 0xbd, 0x2c, 0xe0, + 0x27, 0xa3, 0x14, 0x1c, 0x53, 0x21, 0x13, 0x88, 0x71, 0xa1, 0x77, 0xa1, 0xb0, 0x4b, 0xa9, 0x99, + 0x70, 0x5f, 0x3d, 0x20, 0xf2, 0xf8, 0x26, 0x94, 0xf8, 0xec, 0x36, 0x37, 0x1b, 0x98, 0x43, 0xc9, + 0xbf, 0xf7, 0xd7, 0xa3, 0xe9, 0xf8, 0xb8, 0x17, 0x4f, 0xa5, 0x93, 0xc4, 0xd3, 0xd1, 0xa4, 0x58, + 0x8a, 0xee, 0x43, 0x9e, 0x6a, 0x59, 0x3f, 0x0b, 0x05, 0xe2, 0xe6, 0x6a, 0xd3, 0x0f, 0x48, 0x9b, + 0xab, 0x4d, 0xcc, 0x20, 0xd0, 0x06, 0x0c, 0xb1, 0xec, 0xc3, 0x8e, 0x60, 0x3e, 0xfb, 0x91, 0x66, + 0xf3, 0xf7, 0x1d, 0x82, 0xfd, 0xb2, 0xb1, 0x83, 0x23, 0x7f, 0x0c, 0x63, 0xa1, 0x73, 0x8a, 0x3e, + 0x82, 0xb3, 0x9a, 0xa1, 0xb4, 0xeb, 0x8a, 0xa6, 0xe8, 0x2d, 0xe2, 0x3e, 0x0e, 0x5c, 0x4e, 0xfa, + 0xc2, 0x58, 0x0d, 0xf0, 0x89, 0x53, 0x3e, 0x23, 0x94, 0x9c, 0x0d, 0xd2, 0x70, 0x08, 0x51, 0x56, + 0x00, 0xfc, 0x39, 0xa2, 0x0a, 0x0c, 0x31, 0x3f, 0x73, 0xf2, 0xc9, 0x48, 0x7d, 0x84, 0x59, 0xc8, + 0xdc, 0xcf, 0xc6, 0xce, 0x38, 0xba, 0x01, 0x60, 0x93, 0x96, 0x45, 0x28, 0x0f, 0x06, 0xb9, 0xf0, + 0x03, 0x63, 0xd3, 0xa3, 0xe0, 0x00, 0x97, 0xfc, 0x73, 0x09, 0xc6, 0xd6, 0x09, 0xfd, 0xc4, 0xb0, + 0xf6, 0x1a, 0x86, 0xa6, 0xb6, 0xfa, 0xa7, 0x10, 0x6c, 0x71, 0x28, 0xd8, 0xbe, 0x32, 0x60, 0x67, + 0x42, 0xd6, 0xa5, 0x85, 0x5c, 0xf9, 0x0b, 0x09, 0xe6, 0x42, 0x9c, 0x77, 0xfd, 0xa3, 0xbb, 0x05, + 0x43, 0xa6, 0x61, 0x51, 0x37, 0x11, 0x1f, 0x4b, 0x21, 0x0b, 0x63, 0x81, 0x54, 0xcc, 0x60, 0xb0, + 0x83, 0x86, 0x56, 0x21, 0x47, 0x0d, 0xe1, 0xaa, 0xc7, 0xc3, 0x24, 0xc4, 0xaa, 0x83, 0xc0, 0xcc, + 0x6d, 0x1a, 0x38, 0x47, 0x0d, 0xb6, 0x11, 0xe5, 0x10, 0x57, 0x30, 0xf8, 0x3c, 0xa3, 0x19, 0x60, + 0x28, 0xec, 0x58, 0x46, 0xf7, 0xc4, 0x73, 0xf0, 0x36, 0x62, 0xd9, 0x32, 0xba, 0x98, 0x63, 0xc9, + 0x5f, 0x4a, 0x30, 0x15, 0xe2, 0x3c, 0x85, 0xc0, 0xff, 0x6e, 0x38, 0xf0, 0x5f, 0x3d, 0xce, 0x44, + 0x52, 0xc2, 0xff, 0x97, 0xb9, 0xc8, 0x34, 0xd8, 0x84, 0xd1, 0x0e, 0x8c, 0x9a, 0x46, 0xbb, 0xf9, + 0x14, 0x9e, 0x03, 0x27, 0x58, 0xde, 0x6c, 0xf8, 0x58, 0x38, 0x08, 0x8c, 0x0e, 0x60, 0x4a, 0x57, + 0xba, 0xc4, 0x36, 0x95, 0x16, 0x69, 0x3e, 0x85, 0x0b, 0x92, 0x73, 0xfc, 0xbd, 0x21, 0x8a, 0x88, + 0xe3, 0x4a, 0xd0, 0x1a, 0x14, 0x55, 0x93, 0xd7, 0x71, 0xa2, 0x76, 0x19, 0x98, 0x45, 0x9d, 0xaa, + 0xcf, 0x89, 0xe7, 0xe2, 0x07, 0x76, 0x31, 0xe4, 0x1f, 0x44, 0xbd, 0x81, 0xf9, 0x1f, 0xba, 0x07, + 0x25, 0xde, 0x62, 0xd1, 0x32, 0x34, 0xf7, 0x65, 0x80, 0xed, 0x6c, 0x43, 0x8c, 0x3d, 0x3e, 0xac, + 0x5c, 0x48, 0xb8, 0xf4, 0x75, 0xc9, 0xd8, 0x13, 0x46, 0xeb, 0x50, 0x30, 0xbf, 0x4f, 0x05, 0xc3, + 0x93, 0x1c, 0x2f, 0x5b, 0x38, 0x8e, 0xfc, 0x4f, 0xf9, 0x88, 0xb9, 0x3c, 0xd5, 0x3d, 0x7c, 0x6a, + 0xbb, 0xee, 0x55, 0x4c, 0xa9, 0x3b, 0xbf, 0x0d, 0x45, 0x91, 0xe1, 0x85, 0x33, 0xbf, 0x7e, 0x1c, + 0x67, 0x0e, 0x66, 0x31, 0xef, 0x83, 0xc5, 0x1d, 0x74, 0x81, 0xd1, 0x87, 0x30, 0x4c, 0x1c, 0x15, + 0x4e, 0x6e, 0xbc, 0x75, 0x1c, 0x15, 0x7e, 0x5c, 0xf5, 0x0b, 0x55, 0x31, 0x26, 0x50, 0xd1, 0x5b, + 0x6c, 0xbd, 0x18, 0x2f, 0xfb, 0x08, 0xb4, 0xcb, 0x05, 0x9e, 0xae, 0x2e, 0x3a, 0xd3, 0xf6, 0x86, + 0x1f, 0x1f, 0x56, 0xc0, 0xff, 0x89, 0x83, 0x12, 0xf2, 0x2f, 0x25, 0x98, 0xe2, 0x2b, 0xd4, 0xea, + 0x59, 0x2a, 0xed, 0x9f, 0x5a, 0x62, 0x7a, 0x10, 0x4a, 0x4c, 0xaf, 0x0d, 0x58, 0x96, 0x98, 0x85, + 0xa9, 0xc9, 0xe9, 0x2b, 0x09, 0xce, 0xc5, 0xb8, 0x4f, 0x21, 0x2e, 0x6e, 0x85, 0xe3, 0xe2, 0x2b, + 0xc7, 0x9d, 0x50, 0x4a, 0x6c, 0xfc, 0xe7, 0xc9, 0x84, 0xe9, 0xf0, 0x93, 0x72, 0x03, 0xc0, 0xb4, + 0xd4, 0x7d, 0x55, 0x23, 0x1d, 0xf1, 0x08, 0x5e, 0x0a, 0xb4, 0x38, 0x79, 0x14, 0x1c, 0xe0, 0x42, + 0x36, 0xcc, 0xb6, 0xc9, 0x8e, 0xd2, 0xd3, 0xe8, 0x42, 0xbb, 0xbd, 0xa8, 0x98, 0xca, 0xb6, 0xaa, + 0xa9, 0x54, 0x15, 0xd7, 0x05, 0x23, 0xf5, 0x3b, 0xce, 0xe3, 0x74, 0x12, 0xc7, 0xe3, 0xc3, 0xca, + 0xc5, 0xa4, 0xd7, 0x21, 0x97, 0xa5, 0x8f, 0x53, 0xa0, 0x51, 0x1f, 0xca, 0x16, 0xf9, 0xb8, 0xa7, + 0x5a, 0xa4, 0xbd, 0x64, 0x19, 0x66, 0x48, 0x6d, 0x9e, 0xab, 0xfd, 0xeb, 0xa3, 0xc3, 0x4a, 0x19, + 0xa7, 0xf0, 0x0c, 0x56, 0x9c, 0x0a, 0x8f, 0x1e, 0xc2, 0xb4, 0xe2, 0x74, 0x86, 0x85, 0xb4, 0x3a, + 0xa7, 0xe4, 0xf6, 0xd1, 0x61, 0x65, 0x7a, 0x21, 0x4e, 0x1e, 0xac, 0x30, 0x09, 0x14, 0xd5, 0xa0, + 0xb8, 0xcf, 0xfb, 0xd6, 0xec, 0xf2, 0x10, 0xc7, 0x67, 0x89, 0xa0, 0xe8, 0xb4, 0xb2, 0x31, 0xcc, + 0xe1, 0xe5, 0x26, 0x3f, 0x7d, 0x2e, 0x17, 0xfb, 0xa0, 0x64, 0xb5, 0xa4, 0x38, 0xf1, 0xfc, 0xc6, + 0xb8, 0xe4, 0x47, 0xad, 0xfb, 0x3e, 0x09, 0x07, 0xf9, 0xd0, 0x07, 0x30, 0xb2, 0x2b, 0x6e, 0x25, + 0xec, 0x72, 0x31, 0x53, 0x12, 0x0e, 0xdd, 0x62, 0xd4, 0xa7, 0x84, 0x8a, 0x11, 0x77, 0xd8, 0xc6, + 0x3e, 0x22, 0x7a, 0x09, 0x8a, 0xfc, 0xc7, 0xca, 0x12, 0xbf, 0x8e, 0x2b, 0xf9, 0xb1, 0xed, 0xbe, + 0x33, 0x8c, 0x5d, 0xba, 0xcb, 0xba, 0xd2, 0x58, 0xe4, 0xd7, 0xc2, 0x11, 0xd6, 0x95, 0xc6, 0x22, + 0x76, 0xe9, 0xe8, 0x23, 0x28, 0xda, 0x64, 0x55, 0xd5, 0x7b, 0x07, 0x65, 0xc8, 0xf4, 0xa8, 0xdc, + 0xbc, 0xcb, 0xb9, 0x23, 0x17, 0x63, 0xbe, 0x06, 0x41, 0xc7, 0x2e, 0x2c, 0xda, 0x85, 0x11, 0xab, + 0xa7, 0x2f, 0xd8, 0x5b, 0x36, 0xb1, 0xca, 0xa3, 0x5c, 0xc7, 0xa0, 0x70, 0x8e, 0x5d, 0xfe, 0xa8, + 0x16, 0x6f, 0x85, 0x3c, 0x0e, 0xec, 0x83, 0xa3, 0x7f, 0x93, 0x00, 0xd9, 0x3d, 0xd3, 0xd4, 0x48, + 0x97, 0xe8, 0x54, 0xd1, 0xf8, 0x5d, 0x9c, 0x5d, 0x3e, 0xcb, 0x75, 0xbe, 0x3d, 0x68, 0x5e, 0x31, + 0xc1, 0xa8, 0x72, 0xef, 0xd2, 0x3b, 0xce, 0x8a, 0x13, 0xf4, 0xb2, 0xa5, 0xdd, 0xb1, 0xf9, 0xdf, + 0xe5, 0xb1, 0x4c, 0x4b, 0x9b, 0x7c, 0xe7, 0xe8, 0x2f, 0xad, 0xa0, 0x63, 0x17, 0x16, 0x3d, 0x80, + 0x59, 0xb7, 0xed, 0x11, 0x1b, 0x06, 0x5d, 0x56, 0x35, 0x62, 0xf7, 0x6d, 0x4a, 0xba, 0xe5, 0x71, + 0xbe, 0xed, 0x5e, 0xef, 0x07, 0x4e, 0xe4, 0xc2, 0x29, 0xd2, 0xa8, 0x0b, 0x15, 0x37, 0x64, 0xb0, + 0xf3, 0xe4, 0xc5, 0xac, 0xbb, 0x76, 0x4b, 0xd1, 0x9c, 0x77, 0x80, 0x09, 0xae, 0xe0, 0xc5, 0xa3, + 0xc3, 0x4a, 0x65, 0xe9, 0xc9, 0xac, 0x78, 0x10, 0x16, 0x7a, 0x0f, 0xca, 0x4a, 0x9a, 0x9e, 0x49, + 0xae, 0xe7, 0x79, 0x16, 0x87, 0x52, 0x15, 0xa4, 0x4a, 0x23, 0x0a, 0x93, 0x4a, 0xb8, 0x01, 0xd5, + 0x2e, 0x4f, 0x65, 0xba, 0x88, 0x8c, 0xf4, 0xad, 0xfa, 0x97, 0x11, 0x11, 0x82, 0x8d, 0x63, 0x1a, + 0xd0, 0x3f, 0x00, 0x52, 0xa2, 0x3d, 0xb3, 0x76, 0x19, 0x65, 0x4a, 0x3f, 0xb1, 0x66, 0x5b, 0xdf, + 0xed, 0x62, 0x24, 0x1b, 0x27, 0xe8, 0x41, 0xab, 0x30, 0x23, 0x46, 0xb7, 0x74, 0x5b, 0xd9, 0x21, + 0xcd, 0xbe, 0xdd, 0xa2, 0x9a, 0x5d, 0x9e, 0xe6, 0xb1, 0x8f, 0x3f, 0x7c, 0x2d, 0x24, 0xd0, 0x71, + 0xa2, 0x14, 0x7a, 0x1b, 0x26, 0x77, 0x0c, 0x6b, 0x5b, 0x6d, 0xb7, 0x89, 0xee, 0x22, 0xcd, 0x70, + 0xa4, 0x19, 0xb6, 0x1a, 0xcb, 0x11, 0x1a, 0x8e, 0x71, 0x23, 0x1b, 0xce, 0x09, 0xe4, 0x86, 0x65, + 0xb4, 0xd6, 0x8c, 0x9e, 0x4e, 0x9d, 0x92, 0xe8, 0x9c, 0x97, 0x62, 0xce, 0x2d, 0x24, 0x31, 0x3c, + 0x3e, 0xac, 0x5c, 0x4a, 0xae, 0x80, 0x7d, 0x26, 0x9c, 0x8c, 0x8d, 0x76, 0x01, 0x78, 0x5c, 0x70, + 0x8e, 0xdf, 0x2c, 0x3f, 0x7e, 0xb7, 0xb3, 0x44, 0x9d, 0xc4, 0x13, 0xe8, 0x3c, 0xc9, 0x79, 0x64, + 0x1c, 0xc0, 0xe6, 0xbd, 0x32, 0xe2, 0xe5, 0xe4, 0x74, 0xfa, 0x8d, 0x8f, 0xd7, 0x2b, 0xe3, 0x9b, + 0xf6, 0xd4, 0x7a, 0x65, 0x02, 0x90, 0x4f, 0xbe, 0xab, 0xfd, 0x6d, 0x0e, 0xa6, 0x7d, 0xe6, 0xcc, + 0xbd, 0x32, 0x09, 0x22, 0x7f, 0xee, 0x39, 0x1e, 0xdc, 0x73, 0xfc, 0x85, 0x04, 0xe3, 0xfe, 0xd2, + 0xfd, 0xf1, 0xf5, 0xaf, 0xf8, 0xb6, 0xa5, 0x54, 0xd4, 0x3f, 0xca, 0x05, 0x27, 0xf0, 0x27, 0xdf, + 0x44, 0xf1, 0xfd, 0x1b, 0x85, 0xe5, 0xaf, 0xf2, 0x30, 0x19, 0x3d, 0x8d, 0xa1, 0xb7, 0x76, 0x69, + 0xe0, 0x5b, 0x7b, 0x03, 0x66, 0x76, 0x7a, 0x9a, 0xd6, 0xe7, 0xcb, 0x10, 0x78, 0x70, 0x77, 0xde, + 0xca, 0x9e, 0x17, 0x92, 0x33, 0xcb, 0x09, 0x3c, 0x38, 0x51, 0x32, 0xa5, 0x6f, 0x20, 0x7f, 0xa2, + 0xbe, 0x81, 0xd8, 0x33, 0x76, 0xe1, 0x18, 0xcf, 0xd8, 0x89, 0x3d, 0x00, 0x43, 0x27, 0xe8, 0x01, + 0x38, 0xc9, 0xa3, 0x7d, 0x42, 0x10, 0x1b, 0xd8, 0x43, 0xfa, 0x3c, 0x9c, 0x17, 0x62, 0x94, 0xbf, + 0xa7, 0xeb, 0xd4, 0x32, 0x34, 0x8d, 0x58, 0x4b, 0xbd, 0x6e, 0xb7, 0x2f, 0xbf, 0x09, 0xe3, 0xe1, + 0x4e, 0x11, 0x67, 0xa7, 0x9d, 0x66, 0x15, 0xf1, 0x62, 0x19, 0xd8, 0x69, 0x67, 0x1c, 0x7b, 0x1c, + 0xf2, 0xbf, 0x48, 0x30, 0x9b, 0xdc, 0x11, 0x8a, 0x34, 0x18, 0xef, 0x2a, 0x07, 0xc1, 0x2e, 0x5d, + 0xe9, 0x84, 0x77, 0x49, 0xbc, 0x45, 0x60, 0x2d, 0x84, 0x85, 0x23, 0xd8, 0xf2, 0x77, 0x12, 0xcc, + 0xa5, 0x3c, 0xce, 0x9f, 0xae, 0x25, 0xe8, 0x7d, 0x28, 0x75, 0x95, 0x83, 0x66, 0xcf, 0xea, 0x90, + 0x13, 0xdf, 0x9e, 0xf1, 0x88, 0xb1, 0x26, 0x50, 0xb0, 0x87, 0x27, 0xff, 0x9f, 0x04, 0xcf, 0xa5, + 0x56, 0x14, 0xe8, 0x56, 0xa8, 0x8f, 0x40, 0x8e, 0xf4, 0x11, 0xa0, 0xb8, 0xe0, 0x33, 0x6a, 0x23, + 0xf8, 0x4c, 0x82, 0x72, 0xda, 0xd7, 0x16, 0xba, 0x19, 0x32, 0xf2, 0x85, 0x88, 0x91, 0x53, 0x31, + 0xb9, 0x67, 0x64, 0xe3, 0x0f, 0x25, 0x98, 0x4d, 0xfe, 0xea, 0x44, 0xaf, 0x86, 0x2c, 0xac, 0x44, + 0x2c, 0x9c, 0x88, 0x48, 0x09, 0xfb, 0x3e, 0x84, 0x71, 0xf1, 0x6d, 0x2a, 0x60, 0xc4, 0xde, 0xcb, + 0x49, 0x11, 0x5d, 0x40, 0xb8, 0x95, 0x20, 0xf7, 0xaa, 0xf0, 0x18, 0x8e, 0xa0, 0xc9, 0xff, 0x9a, + 0x83, 0xa1, 0x66, 0x4b, 0xd1, 0xc8, 0x29, 0x14, 0x83, 0xef, 0x84, 0x8a, 0xc1, 0x41, 0xff, 0xf7, + 0xc3, 0xad, 0x4a, 0xad, 0x03, 0x71, 0xa4, 0x0e, 0x7c, 0x39, 0x13, 0xda, 0x93, 0x4b, 0xc0, 0xbf, + 0x82, 0x11, 0x4f, 0xe9, 0xf1, 0x32, 0x93, 0xfc, 0xbf, 0x39, 0x18, 0x0d, 0xa8, 0x38, 0x66, 0x5e, + 0xdb, 0x09, 0xd5, 0x03, 0xf9, 0x0c, 0xe5, 0x7f, 0x40, 0x57, 0xd5, 0xad, 0x00, 0x9c, 0xbe, 0x55, + 0xbf, 0x53, 0x31, 0x5e, 0x18, 0xbc, 0x09, 0xe3, 0x54, 0xb1, 0x3a, 0x84, 0x7a, 0x37, 0xe3, 0x79, + 0xee, 0x8b, 0x5e, 0xb7, 0xf3, 0x66, 0x88, 0x8a, 0x23, 0xdc, 0xe7, 0xef, 0xc0, 0x58, 0x48, 0xd9, + 0xb1, 0xda, 0x4e, 0x7f, 0x22, 0xc1, 0x0b, 0x03, 0xef, 0x2d, 0x50, 0x3d, 0x74, 0x48, 0xaa, 0x91, + 0x43, 0x32, 0x9f, 0x0e, 0xf0, 0xec, 0xda, 0x97, 0xea, 0xd7, 0x1e, 0x7d, 0x3b, 0x7f, 0xe6, 0xeb, + 0x6f, 0xe7, 0xcf, 0x7c, 0xf3, 0xed, 0xfc, 0x99, 0x7f, 0x3c, 0x9a, 0x97, 0x1e, 0x1d, 0xcd, 0x4b, + 0x5f, 0x1f, 0xcd, 0x4b, 0xdf, 0x1c, 0xcd, 0x4b, 0xbf, 0x3e, 0x9a, 0x97, 0xfe, 0xf3, 0xbb, 0xf9, + 0x33, 0xef, 0x17, 0x05, 0xdc, 0x1f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x51, 0x7e, 0x9f, 0x62, 0x14, + 0x3c, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 1a9b7ebb194b4..efcda7ebdea58 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -22,7 +22,6 @@ syntax = 'proto2'; package k8s.io.api.extensions.v1beta1; import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -45,7 +44,7 @@ message AllowedHostPath { // pathPrefix is the path prefix that the host volume must match. // It does not support `*`. // Trailing slashes are trimmed when validating the path prefix with a host path. - // + // // Examples: // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` // `/foo` would not allow `/food` or `/etc/foo` @@ -56,31 +55,6 @@ message AllowedHostPath { optional bool readOnly = 2; } -message CustomMetricCurrentStatus { - // Custom Metric name. - optional string name = 1; - - // Custom Metric value (average). - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; -} - -message CustomMetricCurrentStatusList { - repeated CustomMetricCurrentStatus items = 1; -} - -// Alpha-level support for Custom Metrics in HPA (as annotations). -message CustomMetricTarget { - // Custom Metric name. - optional string name = 1; - - // Custom Metric value (average). - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; -} - -message CustomMetricTargetList { - repeated CustomMetricTarget items = 1; -} - // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for // more information. // DaemonSet represents the configuration of a daemon set. @@ -335,6 +309,8 @@ message DeploymentSpec { // The number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. + // This is set to the max value of int32 (i.e. 2147483647) by default, which + // means "retaining all old RelicaSets". // +optional optional int32 revisionHistoryLimit = 6; @@ -688,7 +664,7 @@ message NetworkPolicyList { message NetworkPolicyPeer { // This is a label selector which selects Pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. - // + // // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. @@ -697,7 +673,7 @@ message NetworkPolicyPeer { // Selects Namespaces using cluster-scoped labels. This field follows standard label // selector semantics; if present but empty, it selects all namespaces. - // + // // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. @@ -847,6 +823,12 @@ message PodSecurityPolicySpec { // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. optional RunAsUserStrategyOptions runAsUser = 11; + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + optional RunAsGroupStrategyOptions runAsGroup = 22; + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. optional SupplementalGroupsStrategyOptions supplementalGroups = 12; @@ -886,7 +868,7 @@ message PodSecurityPolicySpec { // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - // + // // Examples: // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. @@ -896,7 +878,7 @@ message PodSecurityPolicySpec { // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // + // // Examples: // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. @@ -1086,6 +1068,18 @@ message RollingUpdateDeployment { optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. +message RunAsGroupStrategyOptions { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + optional string rule = 1; + + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. // Deprecated: use RunAsUserStrategyOptions from policy API Group instead. message RunAsUserStrategyOptions { diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index 38e112d1e0e7d..5ba6f95854528 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -19,7 +19,6 @@ package v1beta1 import ( appsv1beta1 "k8s.io/api/apps/v1beta1" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -50,8 +49,6 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } -// +genclient -// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // represents a scaling request for a resource. @@ -77,29 +74,6 @@ type ReplicationControllerDummy struct { metav1.TypeMeta `json:",inline"` } -// Alpha-level support for Custom Metrics in HPA (as annotations). -type CustomMetricTarget struct { - // Custom Metric name. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Custom Metric value (average). - TargetValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -type CustomMetricTargetList struct { - Items []CustomMetricTarget `json:"items" protobuf:"bytes,1,rep,name=items"` -} - -type CustomMetricCurrentStatus struct { - // Custom Metric name. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Custom Metric value (average). - CurrentValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -type CustomMetricCurrentStatusList struct { - Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` -} - // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale @@ -151,6 +125,8 @@ type DeploymentSpec struct { // The number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. + // This is set to the max value of int32 (i.e. 2147483647) by default, which + // means "retaining all old RelicaSets". // +optional RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` @@ -918,6 +894,11 @@ type PodSecurityPolicySpec struct { SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. @@ -1072,6 +1053,17 @@ type RunAsUserStrategyOptions struct { Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. +type RunAsGroupStrategyOptions struct { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + // IDRange provides a min/max of an allowed range of IDs. // Deprecated: use IDRange from policy API Group instead. type IDRange struct { @@ -1098,6 +1090,23 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) +// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a +// Security Context. +// Deprecated: use RunAsGroupStrategy from policy API Group instead. +type RunAsGroupStrategy string + +const ( + // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when RunAsGroup are specified, they have to fall in the defined range. + RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" + // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. + // Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead. + RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" + // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. + // Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead. + RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" +) + // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. // Deprecated: use FSGroupStrategyOptions from policy API Group instead. type FSGroupStrategyOptions struct { diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index cdbc490a5ef5d..bce6036cd5d21 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -46,25 +46,6 @@ func (AllowedHostPath) SwaggerDoc() map[string]string { return map_AllowedHostPath } -var map_CustomMetricCurrentStatus = map[string]string{ - "name": "Custom Metric name.", - "value": "Custom Metric value (average).", -} - -func (CustomMetricCurrentStatus) SwaggerDoc() map[string]string { - return map_CustomMetricCurrentStatus -} - -var map_CustomMetricTarget = map[string]string{ - "": "Alpha-level support for Custom Metrics in HPA (as annotations).", - "name": "Custom Metric name.", - "value": "Custom Metric value (average).", -} - -func (CustomMetricTarget) SwaggerDoc() map[string]string { - return map_CustomMetricTarget -} - var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -114,7 +95,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", @@ -193,7 +174,7 @@ var map_DeploymentSpec = map[string]string{ "template": "Template describes the pods that will be created.", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"retaining all old RelicaSets\".", "paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.", "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"no deadline\".", @@ -472,6 +453,7 @@ var map_PodSecurityPolicySpec = map[string]string{ "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", @@ -584,6 +566,16 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { return map_RollingUpdateDeployment } +var map_RunAsGroupStrategyOptions = map[string]string{ + "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.", + "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", + "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsGroupStrategyOptions +} + var map_RunAsUserStrategyOptions = map[string]string{ "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 65801c23edbd7..8128c079b482b 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -59,86 +59,6 @@ func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricCurrentStatus) DeepCopyInto(out *CustomMetricCurrentStatus) { - *out = *in - out.CurrentValue = in.CurrentValue.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatus. -func (in *CustomMetricCurrentStatus) DeepCopy() *CustomMetricCurrentStatus { - if in == nil { - return nil - } - out := new(CustomMetricCurrentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricCurrentStatusList) DeepCopyInto(out *CustomMetricCurrentStatusList) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricCurrentStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatusList. -func (in *CustomMetricCurrentStatusList) DeepCopy() *CustomMetricCurrentStatusList { - if in == nil { - return nil - } - out := new(CustomMetricCurrentStatusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricTarget) DeepCopyInto(out *CustomMetricTarget) { - *out = *in - out.TargetValue = in.TargetValue.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTarget. -func (in *CustomMetricTarget) DeepCopy() *CustomMetricTarget { - if in == nil { - return nil - } - out := new(CustomMetricTarget) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricTargetList) DeepCopyInto(out *CustomMetricTargetList) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricTarget, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTargetList. -func (in *CustomMetricTargetList) DeepCopy() *CustomMetricTargetList { - if in == nil { - return nil - } - out := new(CustomMetricTargetList) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { *out = *in @@ -1102,6 +1022,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { } in.SELinux.DeepCopyInto(&out.SELinux) in.RunAsUser.DeepCopyInto(&out.RunAsUser) + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(RunAsGroupStrategyOptions) + (*in).DeepCopyInto(*out) + } in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) in.FSGroup.DeepCopyInto(&out.FSGroup) if in.DefaultAllowPrivilegeEscalation != nil { @@ -1368,6 +1293,27 @@ func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. +func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { *out = *in diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go index 3b4840ad64e98..598dff2a82a30 100644 --- a/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=imagepolicy.k8s.io + package v1alpha1 // import "k8s.io/api/imagepolicy/v1alpha1" diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go index 6e08dcca84bf9..2a4529bfca976 100644 --- a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -250,24 +249,6 @@ func (m *ImageReviewStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -728,51 +709,14 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -782,41 +726,80 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -972,51 +955,14 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.AuditAnnotations == nil { m.AuditAnnotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1026,41 +972,80 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.AuditAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.AuditAnnotations[mapkey] = mapvalue } + m.AuditAnnotations[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go index ef9ae2ae4cf48..887c366479657 100644 --- a/vendor/k8s.io/api/networking/v1/doc.go +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=networking.k8s.io + package v1 // import "k8s.io/api/networking/v1" diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go index 7b1c04b29a5a3..86bd80c857b87 100644 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -446,24 +445,6 @@ func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index 4e068d08f03c6..ab3731e9c3994 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -114,7 +114,7 @@ message NetworkPolicyList { message NetworkPolicyPeer { // This is a label selector which selects Pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. - // + // // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. @@ -123,7 +123,7 @@ message NetworkPolicyPeer { // Selects Namespaces using cluster-scoped labels. This field follows standard label // selector semantics; if present but empty, it selects all namespaces. - // + // // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go index 9c456f9237fe9..74611c6ba5d1c 100644 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -15,9 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, // NetworkPolicy, etc. -// +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/policy/v1beta1" diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index d7d62dd3ab8b1..d122fcfda12be 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -38,6 +37,7 @@ limitations under the License. PodSecurityPolicy PodSecurityPolicyList PodSecurityPolicySpec + RunAsGroupStrategyOptions RunAsUserStrategyOptions SELinuxStrategyOptions SupplementalGroupsStrategyOptions @@ -125,20 +125,26 @@ func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPol func (*PodSecurityPolicySpec) ProtoMessage() {} func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } +func (*RunAsGroupStrategyOptions) ProtoMessage() {} +func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{13} +} + func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptorGenerated, []int{14} } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} + return fileDescriptorGenerated, []int{16} } func init() { @@ -155,6 +161,7 @@ func init() { proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") + proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsGroupStrategyOptions") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") @@ -853,6 +860,52 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.RunAsGroup != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsGroup.Size())) + n18, err := m.RunAsGroup.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -913,11 +966,11 @@ func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n18, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n19, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 } return i, nil } @@ -956,24 +1009,6 @@ func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1212,6 +1247,24 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if m.RunAsGroup != nil { + l = m.RunAsGroup.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RunAsGroupStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1441,6 +1494,18 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, + `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RunAsGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2537,51 +2602,14 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.DisruptedPods == nil { m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2591,46 +2619,85 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.DisruptedPods[mapkey] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_apis_meta_v1.Time - m.DisruptedPods[mapkey] = mapvalue } + m.DisruptedPods[mapkey] = *mapvalue iNdEx = postIndex case 3: if wireType != 0 { @@ -3537,6 +3604,149 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RunAsGroup == nil { + m.RunAsGroup = &RunAsGroupStrategyOptions{} + } + if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4000,113 +4210,115 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1715 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0x9a, 0x92, 0x48, 0x8d, 0x24, 0x5a, 0x1a, 0xfd, 0xe9, 0x46, 0xa8, 0xb9, 0x0e, 0x03, - 0x14, 0x6e, 0x90, 0x2c, 0x63, 0x39, 0x69, 0x8d, 0xa6, 0x2d, 0xa2, 0x35, 0x25, 0x5b, 0x81, 0x55, - 0xb1, 0x43, 0x3b, 0x68, 0x0b, 0xb7, 0xe8, 0x70, 0x77, 0x44, 0x4e, 0xb4, 0xdc, 0xdd, 0xce, 0xcc, - 0x32, 0xe4, 0xad, 0x87, 0x1e, 0x7a, 0xec, 0x17, 0xc8, 0x27, 0x28, 0x7a, 0xea, 0x97, 0x50, 0x81, - 0xa2, 0xc8, 0x31, 0xe8, 0x81, 0xa8, 0x59, 0xf4, 0x4b, 0xf8, 0xd2, 0x60, 0x87, 0xb3, 0x24, 0xf7, - 0x0f, 0x29, 0x2b, 0x40, 0x7c, 0xdb, 0x9d, 0xf7, 0xfb, 0xfd, 0xde, 0x9b, 0x37, 0x6f, 0xde, 0xce, - 0x0e, 0xb0, 0x2e, 0x1f, 0x72, 0x93, 0xfa, 0xb5, 0xcb, 0xb0, 0x45, 0x98, 0x47, 0x04, 0xe1, 0xb5, - 0x1e, 0xf1, 0x1c, 0x9f, 0xd5, 0x94, 0x01, 0x07, 0xb4, 0x16, 0xf8, 0x2e, 0xb5, 0x07, 0xb5, 0xde, - 0xfd, 0x16, 0x11, 0xf8, 0x7e, 0xad, 0x4d, 0x3c, 0xc2, 0xb0, 0x20, 0x8e, 0x19, 0x30, 0x5f, 0xf8, - 0xf0, 0xad, 0x31, 0xd4, 0xc4, 0x01, 0x35, 0xc7, 0x50, 0x53, 0x41, 0x0f, 0xde, 0x6f, 0x53, 0xd1, - 0x09, 0x5b, 0xa6, 0xed, 0x77, 0x6b, 0x6d, 0xbf, 0xed, 0xd7, 0x24, 0xa3, 0x15, 0x5e, 0xc8, 0x37, - 0xf9, 0x22, 0x9f, 0xc6, 0x4a, 0x07, 0xd5, 0x19, 0xa7, 0xb6, 0xcf, 0x48, 0xad, 0x97, 0xf1, 0x76, - 0xf0, 0xe1, 0x14, 0xd3, 0xc5, 0x76, 0x87, 0x7a, 0x84, 0x0d, 0x6a, 0xc1, 0x65, 0x3b, 0x1a, 0xe0, - 0xb5, 0x2e, 0x11, 0x38, 0x8f, 0x55, 0x9b, 0xc7, 0x62, 0xa1, 0x27, 0x68, 0x97, 0x64, 0x08, 0x3f, - 0xba, 0x8e, 0xc0, 0xed, 0x0e, 0xe9, 0xe2, 0x0c, 0xef, 0xc1, 0x3c, 0x5e, 0x28, 0xa8, 0x5b, 0xa3, - 0x9e, 0xe0, 0x82, 0xa5, 0x49, 0xd5, 0x8f, 0xc1, 0xf6, 0x91, 0xeb, 0xfa, 0x5f, 0x10, 0xe7, 0xc4, - 0x25, 0xfd, 0xcf, 0x7c, 0x37, 0xec, 0x12, 0xf8, 0x03, 0xb0, 0xea, 0x30, 0xda, 0x23, 0x4c, 0xd7, - 0xee, 0x6a, 0xf7, 0xd6, 0xac, 0xf2, 0xd5, 0xd0, 0x58, 0x1a, 0x0d, 0x8d, 0xd5, 0xba, 0x1c, 0x45, - 0xca, 0x5a, 0xe5, 0xe0, 0xb6, 0x22, 0x3f, 0xf1, 0xb9, 0x68, 0x60, 0xd1, 0x81, 0x87, 0x00, 0x04, - 0x58, 0x74, 0x1a, 0x8c, 0x5c, 0xd0, 0xbe, 0xa2, 0x43, 0x45, 0x07, 0x8d, 0x89, 0x05, 0xcd, 0xa0, - 0xe0, 0x7b, 0xa0, 0xc4, 0x08, 0x76, 0xce, 0x3d, 0x77, 0xa0, 0xdf, 0xba, 0xab, 0xdd, 0x2b, 0x59, - 0x5b, 0x8a, 0x51, 0x42, 0x6a, 0x1c, 0x4d, 0x10, 0xd5, 0x7f, 0x6b, 0xa0, 0x74, 0xdc, 0xa3, 0xb6, - 0xa0, 0xbe, 0x07, 0x7f, 0x0f, 0x4a, 0x51, 0xde, 0x1d, 0x2c, 0xb0, 0x74, 0xb6, 0x7e, 0xf8, 0x81, - 0x39, 0xad, 0x89, 0x49, 0x1a, 0xcc, 0xe0, 0xb2, 0x1d, 0x0d, 0x70, 0x33, 0x42, 0x9b, 0xbd, 0xfb, - 0xe6, 0x79, 0xeb, 0x73, 0x62, 0x8b, 0x33, 0x22, 0xf0, 0x34, 0xbc, 0xe9, 0x18, 0x9a, 0xa8, 0x42, - 0x17, 0x6c, 0x3a, 0xc4, 0x25, 0x82, 0x9c, 0x07, 0x91, 0x47, 0x2e, 0x23, 0x5c, 0x3f, 0x7c, 0xf0, - 0x7a, 0x6e, 0xea, 0xb3, 0x54, 0x6b, 0x7b, 0x34, 0x34, 0x36, 0x13, 0x43, 0x28, 0x29, 0x5e, 0xfd, - 0x52, 0x03, 0xfb, 0x27, 0xcd, 0xc7, 0xcc, 0x0f, 0x83, 0xa6, 0x88, 0xd6, 0xa9, 0x3d, 0x50, 0x26, - 0xf8, 0x63, 0xb0, 0xcc, 0x42, 0x97, 0xa8, 0x9c, 0xbe, 0xa3, 0x82, 0x5e, 0x46, 0xa1, 0x4b, 0x5e, - 0x0d, 0x8d, 0x9d, 0x14, 0xeb, 0xd9, 0x20, 0x20, 0x48, 0x12, 0xe0, 0xa7, 0x60, 0x95, 0x61, 0xaf, - 0x4d, 0xa2, 0xd0, 0x0b, 0xf7, 0xd6, 0x0f, 0xab, 0xe6, 0xdc, 0x5d, 0x63, 0x9e, 0xd6, 0x51, 0x04, - 0x9d, 0xae, 0xb8, 0x7c, 0xe5, 0x48, 0x29, 0x54, 0xcf, 0xc0, 0xa6, 0x5c, 0x6a, 0x9f, 0x09, 0x69, - 0x81, 0x77, 0x40, 0xa1, 0x4b, 0x3d, 0x19, 0xd4, 0x8a, 0xb5, 0xae, 0x58, 0x85, 0x33, 0xea, 0xa1, - 0x68, 0x5c, 0x9a, 0x71, 0x5f, 0xe6, 0x6c, 0xd6, 0x8c, 0xfb, 0x28, 0x1a, 0xaf, 0x3e, 0x06, 0x45, - 0xe5, 0x71, 0x56, 0xa8, 0xb0, 0x58, 0xa8, 0x90, 0x23, 0xf4, 0xd7, 0x5b, 0x60, 0xa7, 0xe1, 0x3b, - 0x75, 0xca, 0x59, 0x28, 0xf3, 0x65, 0x85, 0x4e, 0x9b, 0x88, 0x37, 0x50, 0x1f, 0xcf, 0xc0, 0x32, - 0x0f, 0x88, 0xad, 0xca, 0xe2, 0x70, 0x41, 0x6e, 0x73, 0xe2, 0x6b, 0x06, 0xc4, 0xb6, 0x36, 0xe2, - 0xa5, 0x8c, 0xde, 0x90, 0x54, 0x83, 0x2f, 0xc0, 0x2a, 0x17, 0x58, 0x84, 0x5c, 0x2f, 0x48, 0xdd, - 0x0f, 0x6f, 0xa8, 0x2b, 0xb9, 0xd3, 0x55, 0x1c, 0xbf, 0x23, 0xa5, 0x59, 0xfd, 0xa7, 0x06, 0xbe, - 0x97, 0xc3, 0x7a, 0x4a, 0xb9, 0x80, 0x2f, 0x32, 0x19, 0x33, 0x5f, 0x2f, 0x63, 0x11, 0x5b, 0xe6, - 0x6b, 0xb2, 0x79, 0xe3, 0x91, 0x99, 0x6c, 0x35, 0xc1, 0x0a, 0x15, 0xa4, 0x1b, 0x97, 0xa2, 0x79, - 0xb3, 0x69, 0x59, 0x9b, 0x4a, 0x7a, 0xe5, 0x34, 0x12, 0x41, 0x63, 0xad, 0xea, 0xbf, 0x6e, 0xe5, - 0x4e, 0x27, 0x4a, 0x27, 0xbc, 0x00, 0x1b, 0x5d, 0xea, 0x1d, 0xf5, 0x30, 0x75, 0x71, 0x4b, 0xed, - 0x9e, 0x45, 0x45, 0x10, 0xf5, 0x4a, 0x73, 0xdc, 0x2b, 0xcd, 0x53, 0x4f, 0x9c, 0xb3, 0xa6, 0x60, - 0xd4, 0x6b, 0x5b, 0x5b, 0xa3, 0xa1, 0xb1, 0x71, 0x36, 0xa3, 0x84, 0x12, 0xba, 0xf0, 0xb7, 0xa0, - 0xc4, 0x89, 0x4b, 0x6c, 0xe1, 0xb3, 0x9b, 0x75, 0x88, 0xa7, 0xb8, 0x45, 0xdc, 0xa6, 0xa2, 0x5a, - 0x1b, 0x51, 0xde, 0xe2, 0x37, 0x34, 0x91, 0x84, 0x2e, 0x28, 0x77, 0x71, 0xff, 0xb9, 0x87, 0x27, - 0x13, 0x29, 0x7c, 0xcb, 0x89, 0xc0, 0xd1, 0xd0, 0x28, 0x9f, 0x25, 0xb4, 0x50, 0x4a, 0xbb, 0xfa, - 0xbf, 0x65, 0xf0, 0xd6, 0xdc, 0xaa, 0x82, 0x9f, 0x02, 0xe8, 0xb7, 0x38, 0x61, 0x3d, 0xe2, 0x3c, - 0x1e, 0x7f, 0x4d, 0xa8, 0x1f, 0x6f, 0xdc, 0x03, 0xb5, 0x40, 0xf0, 0x3c, 0x83, 0x40, 0x39, 0x2c, - 0xf8, 0x27, 0x0d, 0x6c, 0x3a, 0x63, 0x37, 0xc4, 0x69, 0xf8, 0x4e, 0x5c, 0x18, 0x8f, 0xbf, 0x4d, - 0xbd, 0x9b, 0xf5, 0x59, 0xa5, 0x63, 0x4f, 0xb0, 0x81, 0xb5, 0xa7, 0x02, 0xda, 0x4c, 0xd8, 0x50, - 0xd2, 0x29, 0x3c, 0x03, 0xd0, 0x99, 0x48, 0x72, 0xf5, 0x4d, 0x93, 0x29, 0x5e, 0xb1, 0xee, 0x28, - 0x85, 0xbd, 0x84, 0xdf, 0x18, 0x84, 0x72, 0x88, 0xf0, 0xe7, 0xa0, 0x6c, 0x87, 0x8c, 0x11, 0x4f, - 0x3c, 0x21, 0xd8, 0x15, 0x9d, 0x81, 0xbe, 0x2c, 0xa5, 0xf6, 0x95, 0x54, 0xf9, 0x51, 0xc2, 0x8a, - 0x52, 0xe8, 0x88, 0xef, 0x10, 0x4e, 0x19, 0x71, 0x62, 0xfe, 0x4a, 0x92, 0x5f, 0x4f, 0x58, 0x51, - 0x0a, 0x0d, 0x1f, 0x82, 0x0d, 0xd2, 0x0f, 0x88, 0x1d, 0xe7, 0x74, 0x55, 0xb2, 0x77, 0x15, 0x7b, - 0xe3, 0x78, 0xc6, 0x86, 0x12, 0xc8, 0x03, 0x17, 0xc0, 0x6c, 0x12, 0xe1, 0x16, 0x28, 0x5c, 0x92, - 0xc1, 0xf8, 0xcb, 0x83, 0xa2, 0x47, 0xf8, 0x09, 0x58, 0xe9, 0x61, 0x37, 0x24, 0xaa, 0xd6, 0xdf, - 0x7d, 0xbd, 0x5a, 0x7f, 0x46, 0xbb, 0x04, 0x8d, 0x89, 0x3f, 0xb9, 0xf5, 0x50, 0xab, 0xfe, 0x43, - 0x03, 0xdb, 0x0d, 0xdf, 0x69, 0x12, 0x3b, 0x64, 0x54, 0x0c, 0x1a, 0x72, 0x9d, 0xdf, 0x40, 0xcf, - 0x46, 0x89, 0x9e, 0xfd, 0xc1, 0xe2, 0x5a, 0x4b, 0x46, 0x37, 0xaf, 0x63, 0x57, 0xaf, 0x34, 0xb0, - 0x97, 0x41, 0xbf, 0x81, 0x8e, 0xfa, 0xcb, 0x64, 0x47, 0x7d, 0xef, 0x26, 0x93, 0x99, 0xd3, 0x4f, - 0xff, 0x5f, 0xce, 0x99, 0x8a, 0xec, 0xa6, 0xd1, 0xe9, 0x8e, 0xd1, 0x1e, 0x75, 0x49, 0x9b, 0x38, - 0x72, 0x32, 0xa5, 0x99, 0xd3, 0xdd, 0xc4, 0x82, 0x66, 0x50, 0x90, 0x83, 0x7d, 0x87, 0x5c, 0xe0, - 0xd0, 0x15, 0x47, 0x8e, 0xf3, 0x08, 0x07, 0xb8, 0x45, 0x5d, 0x2a, 0xa8, 0x3a, 0x8e, 0xac, 0x59, - 0x1f, 0x8f, 0x86, 0xc6, 0x7e, 0x3d, 0x17, 0xf1, 0x6a, 0x68, 0xdc, 0xc9, 0x9e, 0xcb, 0xcd, 0x09, - 0x64, 0x80, 0xe6, 0x48, 0xc3, 0x01, 0xd0, 0x19, 0xf9, 0x43, 0x18, 0x6d, 0x8a, 0x3a, 0xf3, 0x83, - 0x84, 0xdb, 0x82, 0x74, 0xfb, 0xb3, 0xd1, 0xd0, 0xd0, 0xd1, 0x1c, 0xcc, 0xf5, 0x8e, 0xe7, 0xca, - 0xc3, 0xcf, 0xc1, 0x0e, 0x1e, 0xf7, 0x81, 0x84, 0xd7, 0x65, 0xe9, 0xf5, 0xe1, 0x68, 0x68, 0xec, - 0x1c, 0x65, 0xcd, 0xd7, 0x3b, 0xcc, 0x13, 0x85, 0x35, 0x50, 0xec, 0xc9, 0x23, 0x3b, 0xd7, 0x57, - 0xa4, 0xfe, 0xde, 0x68, 0x68, 0x14, 0xc7, 0xa7, 0xf8, 0x48, 0x73, 0xf5, 0xa4, 0x29, 0x0f, 0x82, - 0x31, 0x0a, 0x7e, 0x04, 0xd6, 0x3b, 0x3e, 0x17, 0xbf, 0x20, 0xe2, 0x0b, 0x9f, 0x5d, 0xca, 0xc6, - 0x50, 0xb2, 0x76, 0xd4, 0x0a, 0xae, 0x3f, 0x99, 0x9a, 0xd0, 0x2c, 0x0e, 0xfe, 0x1a, 0xac, 0x75, - 0xd4, 0xb1, 0x8f, 0xeb, 0x45, 0x59, 0x68, 0xf7, 0x16, 0x14, 0x5a, 0xe2, 0x88, 0x68, 0x6d, 0x2b, - 0xf9, 0xb5, 0x78, 0x98, 0xa3, 0xa9, 0x1a, 0xfc, 0x21, 0x28, 0xca, 0x97, 0xd3, 0xba, 0x5e, 0x92, - 0xd1, 0xdc, 0x56, 0xf0, 0xe2, 0x93, 0xf1, 0x30, 0x8a, 0xed, 0x31, 0xf4, 0xb4, 0xf1, 0x48, 0x5f, - 0xcb, 0x42, 0x4f, 0x1b, 0x8f, 0x50, 0x6c, 0x87, 0x2f, 0x40, 0x91, 0x93, 0xa7, 0xd4, 0x0b, 0xfb, - 0x3a, 0x90, 0x5b, 0xee, 0xfe, 0x82, 0x70, 0x9b, 0xc7, 0x12, 0x99, 0x3a, 0x70, 0x4f, 0xd5, 0x95, - 0x1d, 0xc5, 0x92, 0xd0, 0x01, 0x6b, 0x2c, 0xf4, 0x8e, 0xf8, 0x73, 0x4e, 0x98, 0xbe, 0x9e, 0xf9, - 0xda, 0xa7, 0xf5, 0x51, 0x8c, 0x4d, 0x7b, 0x98, 0x64, 0x66, 0x82, 0x40, 0x53, 0x61, 0xf8, 0x67, - 0x0d, 0x40, 0x1e, 0x06, 0x81, 0x4b, 0xba, 0xc4, 0x13, 0xd8, 0x95, 0xe7, 0x7b, 0xae, 0x6f, 0x48, - 0x7f, 0x3f, 0x5d, 0x34, 0x9f, 0x0c, 0x29, 0xed, 0x78, 0xf2, 0x99, 0xce, 0x42, 0x51, 0x8e, 0xcf, - 0x28, 0x9d, 0x17, 0x5c, 0x3e, 0xeb, 0x9b, 0xd7, 0xa6, 0x33, 0xff, 0xff, 0x65, 0x9a, 0x4e, 0x65, - 0x47, 0xb1, 0x24, 0xfc, 0x0c, 0xec, 0xc7, 0x7f, 0x77, 0xc8, 0xf7, 0xc5, 0x09, 0x75, 0x09, 0x1f, - 0x70, 0x41, 0xba, 0x7a, 0x59, 0x2e, 0x73, 0x45, 0x31, 0xf7, 0x51, 0x2e, 0x0a, 0xcd, 0x61, 0xc3, - 0x2e, 0x30, 0xe2, 0xf6, 0x10, 0xed, 0x9d, 0x49, 0x7f, 0x3a, 0xe6, 0x36, 0x76, 0xc7, 0xa7, 0x96, - 0xdb, 0xd2, 0xc1, 0x3b, 0xa3, 0xa1, 0x61, 0xd4, 0x17, 0x43, 0xd1, 0x75, 0x5a, 0xf0, 0x57, 0x40, - 0xc7, 0xf3, 0xfc, 0x6c, 0x49, 0x3f, 0xdf, 0x8f, 0x7a, 0xce, 0x5c, 0x07, 0x73, 0xd9, 0x30, 0x00, - 0x5b, 0x38, 0xf9, 0x9f, 0xcd, 0xf5, 0x6d, 0xb9, 0x0b, 0xdf, 0x5d, 0xb0, 0x0e, 0xa9, 0x5f, 0x73, - 0x4b, 0x57, 0x69, 0xdc, 0x4a, 0x19, 0x38, 0xca, 0xa8, 0xc3, 0x3e, 0x80, 0x38, 0x7d, 0x2d, 0xc0, - 0x75, 0x78, 0xed, 0x27, 0x26, 0x73, 0x97, 0x30, 0x2d, 0xb5, 0x8c, 0x89, 0xa3, 0x1c, 0x1f, 0xf0, - 0x29, 0xd8, 0x55, 0xa3, 0xcf, 0x3d, 0x8e, 0x2f, 0x48, 0x73, 0xc0, 0x6d, 0xe1, 0x72, 0x7d, 0x47, - 0xf6, 0x37, 0x7d, 0x34, 0x34, 0x76, 0x8f, 0x72, 0xec, 0x28, 0x97, 0x05, 0x3f, 0x01, 0x5b, 0x17, - 0x3e, 0x6b, 0x51, 0xc7, 0x21, 0x5e, 0xac, 0xb4, 0x2b, 0x95, 0x76, 0xa3, 0x4c, 0x9c, 0xa4, 0x6c, - 0x28, 0x83, 0x86, 0x1c, 0xec, 0x29, 0xe5, 0x06, 0xf3, 0xed, 0x33, 0x3f, 0xf4, 0x44, 0xd4, 0x52, - 0xb9, 0xbe, 0x37, 0xf9, 0x8c, 0xec, 0x1d, 0xe5, 0x01, 0x5e, 0x0d, 0x8d, 0xbb, 0x39, 0x2d, 0x3d, - 0x01, 0x42, 0xf9, 0xda, 0xd5, 0x2f, 0x35, 0xa0, 0xcf, 0xeb, 0x1a, 0xf0, 0xa3, 0xc4, 0x45, 0xc0, - 0xdb, 0xa9, 0x8b, 0x80, 0xed, 0x0c, 0xef, 0x3b, 0xb8, 0x06, 0xf8, 0x9b, 0x06, 0xf6, 0xf3, 0xbb, - 0x26, 0x7c, 0x90, 0x88, 0xce, 0x48, 0x45, 0x77, 0x3b, 0xc5, 0x52, 0xb1, 0xfd, 0x0e, 0x94, 0x55, - 0x6f, 0x4d, 0xde, 0xb2, 0x24, 0x62, 0x8c, 0x32, 0x18, 0x1d, 0x8b, 0x94, 0x44, 0xdc, 0x57, 0xe4, - 0x0f, 0x4d, 0x72, 0x0c, 0xa5, 0xd4, 0xaa, 0x7f, 0xd7, 0xc0, 0xdb, 0xd7, 0x76, 0x45, 0x68, 0x25, - 0x42, 0x37, 0x53, 0xa1, 0x57, 0xe6, 0x0b, 0x7c, 0x37, 0x97, 0x2d, 0xd6, 0xfb, 0x57, 0x2f, 0x2b, - 0x4b, 0x5f, 0xbd, 0xac, 0x2c, 0x7d, 0xfd, 0xb2, 0xb2, 0xf4, 0xc7, 0x51, 0x45, 0xbb, 0x1a, 0x55, - 0xb4, 0xaf, 0x46, 0x15, 0xed, 0xeb, 0x51, 0x45, 0xfb, 0xcf, 0xa8, 0xa2, 0xfd, 0xe5, 0xbf, 0x95, - 0xa5, 0xdf, 0x14, 0x95, 0xdc, 0x37, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xba, 0x23, 0xa4, 0x51, - 0x15, 0x00, 0x00, + // 1756 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x8e, 0xdb, 0xc6, + 0x15, 0x5e, 0x5a, 0xfb, 0xa3, 0x9d, 0xfd, 0x9f, 0xfd, 0x29, 0xbd, 0xa8, 0x45, 0x47, 0x01, 0x0a, + 0x37, 0x48, 0xa8, 0x78, 0x9d, 0xa4, 0x46, 0xd3, 0x16, 0x59, 0x5a, 0xbb, 0xf6, 0x06, 0xde, 0xae, + 0x3a, 0xb2, 0x83, 0xb6, 0x70, 0x8b, 0x8e, 0xc4, 0x59, 0xed, 0x64, 0x29, 0x92, 0x9d, 0x19, 0x2a, + 0xab, 0xbb, 0x5e, 0xf4, 0xa2, 0x97, 0x7d, 0x81, 0xa0, 0x0f, 0x50, 0xf4, 0xaa, 0x2f, 0xe1, 0x02, + 0x45, 0x91, 0xcb, 0xa0, 0x17, 0x42, 0xad, 0x22, 0x2f, 0xe1, 0xab, 0x80, 0xa3, 0x21, 0x25, 0xfe, + 0x49, 0x5e, 0x03, 0xf6, 0x1d, 0x39, 0xe7, 0xfb, 0xbe, 0x73, 0xe6, 0xcc, 0x99, 0x33, 0x43, 0x02, + 0xeb, 0xf2, 0x3e, 0x37, 0xa9, 0x57, 0xbb, 0x0c, 0x5a, 0x84, 0xb9, 0x44, 0x10, 0x5e, 0xeb, 0x11, + 0xd7, 0xf6, 0x58, 0x4d, 0x19, 0xb0, 0x4f, 0x6b, 0xbe, 0xe7, 0xd0, 0x76, 0xbf, 0xd6, 0xbb, 0xdb, + 0x22, 0x02, 0xdf, 0xad, 0x75, 0x88, 0x4b, 0x18, 0x16, 0xc4, 0x36, 0x7d, 0xe6, 0x09, 0x0f, 0xde, + 0x1c, 0x41, 0x4d, 0xec, 0x53, 0x73, 0x04, 0x35, 0x15, 0x74, 0xff, 0x83, 0x0e, 0x15, 0x17, 0x41, + 0xcb, 0x6c, 0x7b, 0xdd, 0x5a, 0xc7, 0xeb, 0x78, 0x35, 0xc9, 0x68, 0x05, 0xe7, 0xf2, 0x4d, 0xbe, + 0xc8, 0xa7, 0x91, 0xd2, 0x7e, 0x75, 0xc2, 0x69, 0xdb, 0x63, 0xa4, 0xd6, 0xcb, 0x78, 0xdb, 0xff, + 0x68, 0x8c, 0xe9, 0xe2, 0xf6, 0x05, 0x75, 0x09, 0xeb, 0xd7, 0xfc, 0xcb, 0x4e, 0x38, 0xc0, 0x6b, + 0x5d, 0x22, 0x70, 0x1e, 0xab, 0x56, 0xc4, 0x62, 0x81, 0x2b, 0x68, 0x97, 0x64, 0x08, 0x9f, 0xcc, + 0x22, 0xf0, 0xf6, 0x05, 0xe9, 0xe2, 0x0c, 0xef, 0x5e, 0x11, 0x2f, 0x10, 0xd4, 0xa9, 0x51, 0x57, + 0x70, 0xc1, 0xd2, 0xa4, 0xea, 0xa7, 0x60, 0xeb, 0xd0, 0x71, 0xbc, 0xaf, 0x88, 0x7d, 0xec, 0x90, + 0xab, 0x2f, 0x3c, 0x27, 0xe8, 0x12, 0xf8, 0x23, 0xb0, 0x68, 0x33, 0xda, 0x23, 0x4c, 0xd7, 0x6e, + 0x6b, 0x77, 0x96, 0xad, 0xf5, 0xe7, 0x03, 0x63, 0x6e, 0x38, 0x30, 0x16, 0xeb, 0x72, 0x14, 0x29, + 0x6b, 0x95, 0x83, 0x0d, 0x45, 0x7e, 0xe4, 0x71, 0xd1, 0xc0, 0xe2, 0x02, 0x1e, 0x00, 0xe0, 0x63, + 0x71, 0xd1, 0x60, 0xe4, 0x9c, 0x5e, 0x29, 0x3a, 0x54, 0x74, 0xd0, 0x88, 0x2d, 0x68, 0x02, 0x05, + 0xdf, 0x07, 0x65, 0x46, 0xb0, 0x7d, 0xe6, 0x3a, 0x7d, 0xfd, 0xc6, 0x6d, 0xed, 0x4e, 0xd9, 0xda, + 0x54, 0x8c, 0x32, 0x52, 0xe3, 0x28, 0x46, 0x54, 0xff, 0xab, 0x81, 0xf2, 0x51, 0x8f, 0xb6, 0x05, + 0xf5, 0x5c, 0xf8, 0x07, 0x50, 0x0e, 0xf3, 0x6e, 0x63, 0x81, 0xa5, 0xb3, 0x95, 0x83, 0x0f, 0xcd, + 0x71, 0x4d, 0xc4, 0x69, 0x30, 0xfd, 0xcb, 0x4e, 0x38, 0xc0, 0xcd, 0x10, 0x6d, 0xf6, 0xee, 0x9a, + 0x67, 0xad, 0x2f, 0x49, 0x5b, 0x9c, 0x12, 0x81, 0xc7, 0xe1, 0x8d, 0xc7, 0x50, 0xac, 0x0a, 0x1d, + 0xb0, 0x66, 0x13, 0x87, 0x08, 0x72, 0xe6, 0x87, 0x1e, 0xb9, 0x8c, 0x70, 0xe5, 0xe0, 0xde, 0xab, + 0xb9, 0xa9, 0x4f, 0x52, 0xad, 0xad, 0xe1, 0xc0, 0x58, 0x4b, 0x0c, 0xa1, 0xa4, 0x78, 0xf5, 0x6b, + 0x0d, 0xec, 0x1d, 0x37, 0x1f, 0x32, 0x2f, 0xf0, 0x9b, 0x22, 0x5c, 0xa7, 0x4e, 0x5f, 0x99, 0xe0, + 0x4f, 0xc0, 0x3c, 0x0b, 0x1c, 0xa2, 0x72, 0xfa, 0xae, 0x0a, 0x7a, 0x1e, 0x05, 0x0e, 0x79, 0x39, + 0x30, 0xb6, 0x53, 0xac, 0x27, 0x7d, 0x9f, 0x20, 0x49, 0x80, 0x9f, 0x83, 0x45, 0x86, 0xdd, 0x0e, + 0x09, 0x43, 0x2f, 0xdd, 0x59, 0x39, 0xa8, 0x9a, 0x85, 0xbb, 0xc6, 0x3c, 0xa9, 0xa3, 0x10, 0x3a, + 0x5e, 0x71, 0xf9, 0xca, 0x91, 0x52, 0xa8, 0x9e, 0x82, 0x35, 0xb9, 0xd4, 0x1e, 0x13, 0xd2, 0x02, + 0x6f, 0x81, 0x52, 0x97, 0xba, 0x32, 0xa8, 0x05, 0x6b, 0x45, 0xb1, 0x4a, 0xa7, 0xd4, 0x45, 0xe1, + 0xb8, 0x34, 0xe3, 0x2b, 0x99, 0xb3, 0x49, 0x33, 0xbe, 0x42, 0xe1, 0x78, 0xf5, 0x21, 0x58, 0x52, + 0x1e, 0x27, 0x85, 0x4a, 0xd3, 0x85, 0x4a, 0x39, 0x42, 0x7f, 0xbf, 0x01, 0xb6, 0x1b, 0x9e, 0x5d, + 0xa7, 0x9c, 0x05, 0x32, 0x5f, 0x56, 0x60, 0x77, 0x88, 0x78, 0x0b, 0xf5, 0xf1, 0x04, 0xcc, 0x73, + 0x9f, 0xb4, 0x55, 0x59, 0x1c, 0x4c, 0xc9, 0x6d, 0x4e, 0x7c, 0x4d, 0x9f, 0xb4, 0xad, 0xd5, 0x68, + 0x29, 0xc3, 0x37, 0x24, 0xd5, 0xe0, 0x33, 0xb0, 0xc8, 0x05, 0x16, 0x01, 0xd7, 0x4b, 0x52, 0xf7, + 0xa3, 0x6b, 0xea, 0x4a, 0xee, 0x78, 0x15, 0x47, 0xef, 0x48, 0x69, 0x56, 0xff, 0xad, 0x81, 0x1f, + 0xe4, 0xb0, 0x1e, 0x53, 0x2e, 0xe0, 0xb3, 0x4c, 0xc6, 0xcc, 0x57, 0xcb, 0x58, 0xc8, 0x96, 0xf9, + 0x8a, 0x37, 0x6f, 0x34, 0x32, 0x91, 0xad, 0x26, 0x58, 0xa0, 0x82, 0x74, 0xa3, 0x52, 0x34, 0xaf, + 0x37, 0x2d, 0x6b, 0x4d, 0x49, 0x2f, 0x9c, 0x84, 0x22, 0x68, 0xa4, 0x55, 0xfd, 0xcf, 0x8d, 0xdc, + 0xe9, 0x84, 0xe9, 0x84, 0xe7, 0x60, 0xb5, 0x4b, 0xdd, 0xc3, 0x1e, 0xa6, 0x0e, 0x6e, 0xa9, 0xdd, + 0x33, 0xad, 0x08, 0xc2, 0x5e, 0x69, 0x8e, 0x7a, 0xa5, 0x79, 0xe2, 0x8a, 0x33, 0xd6, 0x14, 0x8c, + 0xba, 0x1d, 0x6b, 0x73, 0x38, 0x30, 0x56, 0x4f, 0x27, 0x94, 0x50, 0x42, 0x17, 0xfe, 0x0e, 0x94, + 0x39, 0x71, 0x48, 0x5b, 0x78, 0xec, 0x7a, 0x1d, 0xe2, 0x31, 0x6e, 0x11, 0xa7, 0xa9, 0xa8, 0xd6, + 0x6a, 0x98, 0xb7, 0xe8, 0x0d, 0xc5, 0x92, 0xd0, 0x01, 0xeb, 0x5d, 0x7c, 0xf5, 0xd4, 0xc5, 0xf1, + 0x44, 0x4a, 0xaf, 0x39, 0x11, 0x38, 0x1c, 0x18, 0xeb, 0xa7, 0x09, 0x2d, 0x94, 0xd2, 0xae, 0x7e, + 0x37, 0x0f, 0x6e, 0x16, 0x56, 0x15, 0xfc, 0x1c, 0x40, 0xaf, 0xc5, 0x09, 0xeb, 0x11, 0xfb, 0xe1, + 0xe8, 0x34, 0xa1, 0x5e, 0xb4, 0x71, 0xf7, 0xd5, 0x02, 0xc1, 0xb3, 0x0c, 0x02, 0xe5, 0xb0, 0xe0, + 0x9f, 0x35, 0xb0, 0x66, 0x8f, 0xdc, 0x10, 0xbb, 0xe1, 0xd9, 0x51, 0x61, 0x3c, 0x7c, 0x9d, 0x7a, + 0x37, 0xeb, 0x93, 0x4a, 0x47, 0xae, 0x60, 0x7d, 0x6b, 0x57, 0x05, 0xb4, 0x96, 0xb0, 0xa1, 0xa4, + 0x53, 0x78, 0x0a, 0xa0, 0x1d, 0x4b, 0x72, 0x75, 0xa6, 0xc9, 0x14, 0x2f, 0x58, 0xb7, 0x94, 0xc2, + 0x6e, 0xc2, 0x6f, 0x04, 0x42, 0x39, 0x44, 0xf8, 0x0b, 0xb0, 0xde, 0x0e, 0x18, 0x23, 0xae, 0x78, + 0x44, 0xb0, 0x23, 0x2e, 0xfa, 0xfa, 0xbc, 0x94, 0xda, 0x53, 0x52, 0xeb, 0x0f, 0x12, 0x56, 0x94, + 0x42, 0x87, 0x7c, 0x9b, 0x70, 0xca, 0x88, 0x1d, 0xf1, 0x17, 0x92, 0xfc, 0x7a, 0xc2, 0x8a, 0x52, + 0x68, 0x78, 0x1f, 0xac, 0x92, 0x2b, 0x9f, 0xb4, 0xa3, 0x9c, 0x2e, 0x4a, 0xf6, 0x8e, 0x62, 0xaf, + 0x1e, 0x4d, 0xd8, 0x50, 0x02, 0xb9, 0xef, 0x00, 0x98, 0x4d, 0x22, 0xdc, 0x04, 0xa5, 0x4b, 0xd2, + 0x1f, 0x9d, 0x3c, 0x28, 0x7c, 0x84, 0x9f, 0x81, 0x85, 0x1e, 0x76, 0x02, 0xa2, 0x6a, 0xfd, 0xbd, + 0x57, 0xab, 0xf5, 0x27, 0xb4, 0x4b, 0xd0, 0x88, 0xf8, 0xd3, 0x1b, 0xf7, 0xb5, 0xea, 0xbf, 0x34, + 0xb0, 0xd5, 0xf0, 0xec, 0x26, 0x69, 0x07, 0x8c, 0x8a, 0x7e, 0x43, 0xae, 0xf3, 0x5b, 0xe8, 0xd9, + 0x28, 0xd1, 0xb3, 0x3f, 0x9c, 0x5e, 0x6b, 0xc9, 0xe8, 0x8a, 0x3a, 0x76, 0xf5, 0xb9, 0x06, 0x76, + 0x33, 0xe8, 0xb7, 0xd0, 0x51, 0x7f, 0x95, 0xec, 0xa8, 0xef, 0x5f, 0x67, 0x32, 0x05, 0xfd, 0xf4, + 0xbb, 0x8d, 0x9c, 0xa9, 0xc8, 0x6e, 0x1a, 0xde, 0xee, 0x18, 0xed, 0x51, 0x87, 0x74, 0x88, 0x2d, + 0x27, 0x53, 0x9e, 0xb8, 0xdd, 0xc5, 0x16, 0x34, 0x81, 0x82, 0x1c, 0xec, 0xd9, 0xe4, 0x1c, 0x07, + 0x8e, 0x38, 0xb4, 0xed, 0x07, 0xd8, 0xc7, 0x2d, 0xea, 0x50, 0x41, 0xd5, 0x75, 0x64, 0xd9, 0xfa, + 0x74, 0x38, 0x30, 0xf6, 0xea, 0xb9, 0x88, 0x97, 0x03, 0xe3, 0x56, 0xf6, 0x5e, 0x6e, 0xc6, 0x90, + 0x3e, 0x2a, 0x90, 0x86, 0x7d, 0xa0, 0x33, 0xf2, 0xc7, 0x20, 0xdc, 0x14, 0x75, 0xe6, 0xf9, 0x09, + 0xb7, 0x25, 0xe9, 0xf6, 0xe7, 0xc3, 0x81, 0xa1, 0xa3, 0x02, 0xcc, 0x6c, 0xc7, 0x85, 0xf2, 0xf0, + 0x4b, 0xb0, 0x8d, 0x47, 0x7d, 0x20, 0xe1, 0x75, 0x5e, 0x7a, 0xbd, 0x3f, 0x1c, 0x18, 0xdb, 0x87, + 0x59, 0xf3, 0x6c, 0x87, 0x79, 0xa2, 0xb0, 0x06, 0x96, 0x7a, 0xf2, 0xca, 0xce, 0xf5, 0x05, 0xa9, + 0xbf, 0x3b, 0x1c, 0x18, 0x4b, 0xa3, 0x5b, 0x7c, 0xa8, 0xb9, 0x78, 0xdc, 0x94, 0x17, 0xc1, 0x08, + 0x05, 0x3f, 0x06, 0x2b, 0x17, 0x1e, 0x17, 0xbf, 0x24, 0xe2, 0x2b, 0x8f, 0x5d, 0xca, 0xc6, 0x50, + 0xb6, 0xb6, 0xd5, 0x0a, 0xae, 0x3c, 0x1a, 0x9b, 0xd0, 0x24, 0x0e, 0xfe, 0x06, 0x2c, 0x5f, 0xa8, + 0x6b, 0x1f, 0xd7, 0x97, 0x64, 0xa1, 0xdd, 0x99, 0x52, 0x68, 0x89, 0x2b, 0xa2, 0xb5, 0xa5, 0xe4, + 0x97, 0xa3, 0x61, 0x8e, 0xc6, 0x6a, 0xf0, 0xc7, 0x60, 0x49, 0xbe, 0x9c, 0xd4, 0xf5, 0xb2, 0x8c, + 0x66, 0x43, 0xc1, 0x97, 0x1e, 0x8d, 0x86, 0x51, 0x64, 0x8f, 0xa0, 0x27, 0x8d, 0x07, 0xfa, 0x72, + 0x16, 0x7a, 0xd2, 0x78, 0x80, 0x22, 0x3b, 0x7c, 0x06, 0x96, 0x38, 0x79, 0x4c, 0xdd, 0xe0, 0x4a, + 0x07, 0x72, 0xcb, 0xdd, 0x9d, 0x12, 0x6e, 0xf3, 0x48, 0x22, 0x53, 0x17, 0xee, 0xb1, 0xba, 0xb2, + 0xa3, 0x48, 0x12, 0xda, 0x60, 0x99, 0x05, 0xee, 0x21, 0x7f, 0xca, 0x09, 0xd3, 0x57, 0x32, 0xa7, + 0x7d, 0x5a, 0x1f, 0x45, 0xd8, 0xb4, 0x87, 0x38, 0x33, 0x31, 0x02, 0x8d, 0x85, 0xe1, 0x5f, 0x34, + 0x00, 0x79, 0xe0, 0xfb, 0x0e, 0xe9, 0x12, 0x57, 0x60, 0x47, 0xde, 0xef, 0xb9, 0xbe, 0x2a, 0xfd, + 0xfd, 0x6c, 0xda, 0x7c, 0x32, 0xa4, 0xb4, 0xe3, 0xf8, 0x98, 0xce, 0x42, 0x51, 0x8e, 0xcf, 0x30, + 0x9d, 0xe7, 0x5c, 0x3e, 0xeb, 0x6b, 0x33, 0xd3, 0x99, 0xff, 0xfd, 0x32, 0x4e, 0xa7, 0xb2, 0xa3, + 0x48, 0x12, 0x7e, 0x01, 0xf6, 0xa2, 0xaf, 0x3b, 0xe4, 0x79, 0xe2, 0x98, 0x3a, 0x84, 0xf7, 0xb9, + 0x20, 0x5d, 0x7d, 0x5d, 0x2e, 0x73, 0x45, 0x31, 0xf7, 0x50, 0x2e, 0x0a, 0x15, 0xb0, 0x61, 0x17, + 0x18, 0x51, 0x7b, 0x08, 0xf7, 0x4e, 0xdc, 0x9f, 0x8e, 0x78, 0x1b, 0x3b, 0xa3, 0x5b, 0xcb, 0x86, + 0x74, 0xf0, 0xee, 0x70, 0x60, 0x18, 0xf5, 0xe9, 0x50, 0x34, 0x4b, 0x0b, 0xfe, 0x1a, 0xe8, 0xb8, + 0xc8, 0xcf, 0xa6, 0xf4, 0xf3, 0xc3, 0xb0, 0xe7, 0x14, 0x3a, 0x28, 0x64, 0x43, 0x1f, 0x6c, 0xe2, + 0xe4, 0x77, 0x36, 0xd7, 0xb7, 0xe4, 0x2e, 0x7c, 0x6f, 0xca, 0x3a, 0xa4, 0x3e, 0xcd, 0x2d, 0x5d, + 0xa5, 0x71, 0x33, 0x65, 0xe0, 0x28, 0xa3, 0x0e, 0xaf, 0x00, 0xc4, 0xe9, 0xdf, 0x02, 0x5c, 0x87, + 0x33, 0x8f, 0x98, 0xcc, 0xbf, 0x84, 0x71, 0xa9, 0x65, 0x4c, 0x1c, 0xe5, 0xf8, 0x80, 0x8f, 0xc1, + 0x8e, 0x1a, 0x7d, 0xea, 0x72, 0x7c, 0x4e, 0x9a, 0x7d, 0xde, 0x16, 0x0e, 0xd7, 0xb7, 0x65, 0x7f, + 0xd3, 0x87, 0x03, 0x63, 0xe7, 0x30, 0xc7, 0x8e, 0x72, 0x59, 0xf0, 0x33, 0xb0, 0x79, 0xee, 0xb1, + 0x16, 0xb5, 0x6d, 0xe2, 0x46, 0x4a, 0x3b, 0x52, 0x69, 0x27, 0xcc, 0xc4, 0x71, 0xca, 0x86, 0x32, + 0x68, 0xc8, 0xc1, 0xae, 0x52, 0x6e, 0x30, 0xaf, 0x7d, 0xea, 0x05, 0xae, 0x08, 0x5b, 0x2a, 0xd7, + 0x77, 0xe3, 0x63, 0x64, 0xf7, 0x30, 0x0f, 0xf0, 0x72, 0x60, 0xdc, 0xce, 0x69, 0xe9, 0x09, 0x10, + 0xca, 0xd7, 0x86, 0x36, 0x00, 0xb2, 0x0f, 0x8c, 0xb6, 0xdc, 0xde, 0xcc, 0x4f, 0x40, 0x14, 0x83, + 0xd3, 0xbb, 0x6e, 0x3d, 0x3c, 0x99, 0xc7, 0x66, 0x34, 0xa1, 0x5b, 0xfd, 0x9b, 0x06, 0x6e, 0x16, + 0x32, 0xe1, 0x27, 0x89, 0xff, 0x0d, 0xd5, 0xd4, 0xff, 0x06, 0x98, 0x25, 0xbe, 0x81, 0xdf, 0x0d, + 0x5f, 0x6b, 0x40, 0x2f, 0xea, 0x9e, 0xf0, 0xe3, 0x44, 0x80, 0xef, 0xa4, 0x02, 0xdc, 0xca, 0xf0, + 0xde, 0x40, 0x7c, 0xff, 0xd0, 0xc0, 0x5e, 0xfe, 0xe9, 0x01, 0xef, 0x25, 0xa2, 0x33, 0x52, 0xd1, + 0x6d, 0xa4, 0x58, 0x2a, 0xb6, 0xdf, 0x83, 0x75, 0x75, 0xc6, 0x24, 0xff, 0x36, 0x25, 0x62, 0x0c, + 0x2b, 0x29, 0xbc, 0x1e, 0x2a, 0x89, 0x68, 0xa5, 0xe5, 0x87, 0x5d, 0x72, 0x0c, 0xa5, 0xd4, 0xaa, + 0xff, 0xd4, 0xc0, 0x3b, 0x33, 0x4f, 0x07, 0x68, 0x25, 0x42, 0x37, 0x53, 0xa1, 0x57, 0x8a, 0x05, + 0xde, 0xcc, 0x4f, 0x27, 0xeb, 0x83, 0xe7, 0x2f, 0x2a, 0x73, 0xdf, 0xbc, 0xa8, 0xcc, 0x7d, 0xfb, + 0xa2, 0x32, 0xf7, 0xa7, 0x61, 0x45, 0x7b, 0x3e, 0xac, 0x68, 0xdf, 0x0c, 0x2b, 0xda, 0xb7, 0xc3, + 0x8a, 0xf6, 0xbf, 0x61, 0x45, 0xfb, 0xeb, 0xff, 0x2b, 0x73, 0xbf, 0x5d, 0x52, 0x72, 0xdf, 0x07, + 0x00, 0x00, 0xff, 0xff, 0x15, 0x2e, 0xf4, 0x72, 0x59, 0x16, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index aa37d948f5207..e9df3c16fe4c3 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -42,7 +42,7 @@ message AllowedHostPath { // pathPrefix is the path prefix that the host volume must match. // It does not support `*`. // Trailing slashes are trimmed when validating the path prefix with a host path. - // + // // Examples: // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` // `/foo` would not allow `/food` or `/etc/foo` @@ -58,9 +58,11 @@ message AllowedHostPath { // created by POSTing to .../pods//evictions. message Eviction { // ObjectMeta describes the pod that is being evicted. + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DeleteOptions may be provided + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } @@ -97,17 +99,21 @@ message IDRange { // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods message PodDisruptionBudget { + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the PodDisruptionBudget. + // +optional optional PodDisruptionBudgetSpec spec = 2; // Most recently observed status of the PodDisruptionBudget. + // +optional optional PodDisruptionBudgetStatus status = 3; } // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. message PodDisruptionBudgetList { + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated PodDisruptionBudget items = 2; @@ -119,16 +125,19 @@ message PodDisruptionBudgetSpec { // "selector" will still be available after the eviction, i.e. even in the // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". + // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; // Label query over pods whose evictions are managed by the disruption // budget. + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". + // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; } @@ -242,6 +251,12 @@ message PodSecurityPolicySpec { // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. optional RunAsUserStrategyOptions runAsUser = 11; + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + optional RunAsGroupStrategyOptions runAsGroup = 22; + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. optional SupplementalGroupsStrategyOptions supplementalGroups = 12; @@ -281,7 +296,7 @@ message PodSecurityPolicySpec { // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - // + // // Examples: // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. @@ -291,7 +306,7 @@ message PodSecurityPolicySpec { // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // + // // Examples: // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. @@ -305,6 +320,17 @@ message PodSecurityPolicySpec { repeated string allowedProcMountTypes = 21; } +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +message RunAsGroupStrategyOptions { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + optional string rule = 1; + + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. message RunAsUserStrategyOptions { // rule is the strategy that will dictate the allowable RunAsUser values that may be set. diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index c1a2727509a0c..91ea1185878a5 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -28,16 +28,19 @@ type PodDisruptionBudgetSpec struct { // "selector" will still be available after the eviction, i.e. even in the // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". + // +optional MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` // Label query over pods whose evictions are managed by the disruption // budget. + // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". + // +optional MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"` } @@ -81,12 +84,15 @@ type PodDisruptionBudgetStatus struct { // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods type PodDisruptionBudget struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the PodDisruptionBudget. + // +optional Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the PodDisruptionBudget. + // +optional Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -95,6 +101,7 @@ type PodDisruptionBudget struct { // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. type PodDisruptionBudgetList struct { metav1.TypeMeta `json:",inline"` + // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -110,9 +117,11 @@ type Eviction struct { metav1.TypeMeta `json:",inline"` // ObjectMeta describes the pod that is being evicted. + // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // DeleteOptions may be provided + // +optional DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` } @@ -174,6 +183,11 @@ type PodSecurityPolicySpec struct { SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. @@ -245,6 +259,10 @@ type AllowedHostPath struct { ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` } +// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities +// field and means that any capabilities are allowed to be requested. +var AllowAllCapabilities v1.Capability = "*" + // FSType gives strong typing to different file systems that are used by volumes. type FSType string @@ -268,8 +286,15 @@ var ( DownwardAPI FSType = "downwardAPI" FC FSType = "fc" ConfigMap FSType = "configMap" + VsphereVolume FSType = "vsphereVolume" Quobyte FSType = "quobyte" AzureDisk FSType = "azureDisk" + PhotonPersistentDisk FSType = "photonPersistentDisk" + StorageOS FSType = "storageos" + Projected FSType = "projected" + PortworxVolume FSType = "portworxVolume" + ScaleIO FSType = "scaleIO" + CSI FSType = "csi" All FSType = "*" ) @@ -319,6 +344,16 @@ type RunAsUserStrategyOptions struct { Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsGroupStrategyOptions struct { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + // IDRange provides a min/max of an allowed range of IDs. type IDRange struct { // min is the start of the range, inclusive. @@ -340,6 +375,20 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) +// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a +// Security Context. +type RunAsGroupStrategy string + +const ( + // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when RunAsGroup are specified, they have to fall in the defined range. + RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" + // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. + RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" + // RunAsUserStrategyRunAsAny means that container may make requests for any gid. + RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" +) + // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. type FSGroupStrategyOptions struct { // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. @@ -356,6 +405,9 @@ type FSGroupStrategyOptions struct { type FSGroupStrategyType string const ( + // FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied. + // However, when FSGroups are specified, they have to fall in the defined range. + FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs" // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. @@ -378,6 +430,9 @@ type SupplementalGroupsStrategyOptions struct { type SupplementalGroupsStrategyType string const ( + // SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when gids are specified, they have to fall in the defined range. + SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs" // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index df10b2a29b8b8..547ef18ea410c 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -162,6 +162,7 @@ var map_PodSecurityPolicySpec = map[string]string{ "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", @@ -178,6 +179,16 @@ func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { return map_PodSecurityPolicySpec } +var map_RunAsGroupStrategyOptions = map[string]string{ + "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.", + "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", + "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsGroupStrategyOptions +} + var map_RunAsUserStrategyOptions = map[string]string{ "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 9af268a438202..1a02ae6007016 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -348,6 +348,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { } in.SELinux.DeepCopyInto(&out.SELinux) in.RunAsUser.DeepCopyInto(&out.RunAsUser) + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(RunAsGroupStrategyOptions) + (*in).DeepCopyInto(*out) + } in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) in.FSGroup.DeepCopyInto(&out.FSGroup) if in.DefaultAllowPrivilegeEscalation != nil { @@ -398,6 +403,27 @@ func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. +func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { *out = *in diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go index 28ceb269b4e7f..76899ef0965f4 100644 --- a/vendor/k8s.io/api/rbac/v1/doc.go +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io + package v1 // import "k8s.io/api/rbac/v1" diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index 21010fbeeaf30..708db3276ede4 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -641,24 +640,6 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 0ec20c88e79c6..83ce310e6fa36 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go index 5236a477f0084..f2547a58f7779 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io + package v1alpha1 // import "k8s.io/api/rbac/v1alpha1" diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 71eced8d4ec89..e035b331fa5e0 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -641,24 +640,6 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index 1d6ef30b0eb74..d7b194ae40740 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go index 4b77c9c6b834e..516625eeeae26 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io + package v1beta1 // import "k8s.io/api/rbac/v1beta1" diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 71e5799e3e639..904a6e7a2dc2a 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -641,24 +640,6 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index 66dba6ca13ebd..c80327593d789 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go index e10d07ff742a7..05a454a529ca6 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io + package v1alpha1 // import "k8s.io/api/scheduling/v1alpha1" diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go index 97c07c98413b5..0a0d481a287cf 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -141,24 +140,6 @@ func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/scheduling/v1beta1/doc.go b/vendor/k8s.io/api/scheduling/v1beta1/doc.go index f2dd1cfac7057..7cf1af2124c7d 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io + package v1beta1 // import "k8s.io/api/scheduling/v1beta1" diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go index ea8f8d5e6e38a..ddb285446b7f6 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -141,24 +140,6 @@ func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/settings/v1alpha1/doc.go index 05a62c569ed14..9126211d645f0 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/api/settings/v1alpha1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=settings.k8s.io + package v1alpha1 // import "k8s.io/api/settings/v1alpha1" diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go index 15285bae50833..c842131057a09 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -216,24 +215,6 @@ func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go index 8f4a4045c43b1..ff8bb34ca1c8a 100644 --- a/vendor/k8s.io/api/storage/v1/doc.go +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true + package v1 diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index d43a982981d67..e4b29311b68a9 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -27,6 +26,12 @@ limitations under the License. It has these top-level messages: StorageClass StorageClassList + VolumeAttachment + VolumeAttachmentList + VolumeAttachmentSource + VolumeAttachmentSpec + VolumeAttachmentStatus + VolumeError */ package v1 @@ -62,9 +67,39 @@ func (m *StorageClassList) Reset() { *m = StorageClassList{} func (*StorageClassList) ProtoMessage() {} func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + func init() { proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") + proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1.VolumeAttachment") + proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1.VolumeAttachmentList") + proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSource") + proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSpec") + proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus") + proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1.VolumeError") } func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -205,24 +240,242 @@ func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 +func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n5, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PersistentVolumeName != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i += copy(dAtA[i:], *m.PersistentVolumeName) + } + return i, nil +} + +func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i += copy(dAtA[i:], m.Attacher) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) + n7, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + return i, nil +} + +func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.AttachmentMetadata) > 0 { + keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) + for k := range m.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + for _, k := range keysForAttachmentMetadata { + dAtA[i] = 0x12 + i++ + v := m.AttachmentMetadata[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.AttachError != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) + n8, err := m.AttachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.DetachError != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) + n9, err := m.DetachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *VolumeError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) + n10, err := m.Time.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -287,6 +540,87 @@ func (m *StorageClassList) Size() (n int) { return n } +func (m *VolumeAttachment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachmentSource) Size() (n int) { + var l int + _ = l + if m.PersistentVolumeName != nil { + l = len(*m.PersistentVolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeAttachmentSpec) Size() (n int) { + var l int + _ = l + l = len(m.Attacher) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentStatus) Size() (n int) { + var l int + _ = l + n += 2 + if len(m.AttachmentMetadata) > 0 { + for k, v := range m.AttachmentMetadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AttachError != nil { + l = m.AttachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DetachError != nil { + l = m.DetachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeError) Size() (n int) { + var l int + _ = l + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func sovGenerated(x uint64) (n int) { for { n++ @@ -331,22 +665,911 @@ func (this *StorageClassList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" + } + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StorageClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provisioner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) + m.ReclaimPolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) + if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttachment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PersistentVolumeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *StorageClass) Unmarshal(dAtA []byte) error { +func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -369,17 +1592,17 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -389,27 +1612,26 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Attacher = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -419,26 +1641,27 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Provisioner = string(dAtA[iNdEx:postIndex]) + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -448,19 +1671,76 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -470,12 +1750,17 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + m.Attached = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -485,26 +1770,26 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Parameters == nil { - m.Parameters = make(map[string]string) + if m.AttachmentMetadata == nil { + m.AttachmentMetadata = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -514,77 +1799,86 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Parameters[mapkey] = mapvalue - } else { - var mapvalue string - m.Parameters[mapkey] = mapvalue - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF } - s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) - m.ReclaimPolicy = &s + m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -594,75 +1888,28 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AllowVolumeExpansion = &b - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + if m.AttachError == nil { + m.AttachError = &VolumeError{} } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - s := VolumeBindingMode(dAtA[iNdEx:postIndex]) - m.VolumeBindingMode = &s iNdEx = postIndex - case 8: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -686,8 +1933,10 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) - if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.DetachError == nil { + m.DetachError = &VolumeError{} + } + if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -712,7 +1961,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } return nil } -func (m *StorageClassList) Unmarshal(dAtA []byte) error { +func (m *VolumeError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -735,15 +1984,15 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -767,15 +2016,15 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -785,22 +2034,20 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, StorageClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -933,46 +2180,67 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 656 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0x8e, 0x93, 0x9b, 0xde, 0x74, 0xd2, 0xea, 0x26, 0xbe, 0xbd, 0x92, 0x6f, 0x16, 0x4e, 0x54, - 0x36, 0x11, 0x12, 0xe3, 0xa6, 0x14, 0x54, 0x21, 0x81, 0x54, 0xa3, 0x4a, 0x20, 0xb5, 0x6a, 0xe4, - 0x56, 0x15, 0x42, 0x2c, 0x98, 0x38, 0x07, 0x77, 0x88, 0xed, 0x31, 0x33, 0x63, 0x43, 0x76, 0xbc, - 0x00, 0x12, 0xcf, 0xc3, 0x13, 0x74, 0xd9, 0x65, 0x57, 0x16, 0x35, 0x6f, 0xd1, 0x15, 0xf2, 0x0f, - 0x8d, 0x9b, 0x04, 0xd1, 0xdd, 0xcc, 0x77, 0xbe, 0xef, 0x3b, 0x33, 0xe7, 0x07, 0x3d, 0x9b, 0xec, - 0x0a, 0x4c, 0x99, 0x31, 0x09, 0x47, 0xc0, 0x7d, 0x90, 0x20, 0x8c, 0x08, 0xfc, 0x31, 0xe3, 0x46, - 0x11, 0x20, 0x01, 0x35, 0x84, 0x64, 0x9c, 0x38, 0x60, 0x44, 0x03, 0xc3, 0x01, 0x1f, 0x38, 0x91, - 0x30, 0xc6, 0x01, 0x67, 0x92, 0xa9, 0xff, 0xe5, 0x34, 0x4c, 0x02, 0x8a, 0x0b, 0x1a, 0x8e, 0x06, - 0x9d, 0x07, 0x0e, 0x95, 0x67, 0xe1, 0x08, 0xdb, 0xcc, 0x33, 0x1c, 0xe6, 0x30, 0x23, 0x63, 0x8f, - 0xc2, 0x77, 0xd9, 0x2d, 0xbb, 0x64, 0xa7, 0xdc, 0xa5, 0xb3, 0x59, 0x4a, 0x66, 0x33, 0xbe, 0x2c, - 0x53, 0x67, 0x67, 0xc6, 0xf1, 0x88, 0x7d, 0x46, 0x7d, 0xe0, 0x53, 0x23, 0x98, 0x38, 0x29, 0x20, - 0x0c, 0x0f, 0x24, 0x59, 0xa6, 0x32, 0x7e, 0xa7, 0xe2, 0xa1, 0x2f, 0xa9, 0x07, 0x0b, 0x82, 0xc7, - 0x7f, 0x12, 0x08, 0xfb, 0x0c, 0x3c, 0x32, 0xaf, 0xdb, 0xfc, 0xb2, 0x82, 0xd6, 0x8e, 0xf3, 0x02, - 0x3c, 0x77, 0x89, 0x10, 0xea, 0x5b, 0xd4, 0x48, 0x1f, 0x35, 0x26, 0x92, 0x68, 0x4a, 0x4f, 0xe9, - 0x37, 0xb7, 0xb7, 0xf0, 0xac, 0x58, 0x37, 0xde, 0x38, 0x98, 0x38, 0x29, 0x20, 0x70, 0xca, 0xc6, - 0xd1, 0x00, 0x1f, 0x8d, 0xde, 0x83, 0x2d, 0x0f, 0x41, 0x12, 0x53, 0x3d, 0x8f, 0xbb, 0x95, 0x24, - 0xee, 0xa2, 0x19, 0x66, 0xdd, 0xb8, 0xaa, 0x8f, 0x50, 0x33, 0xe0, 0x2c, 0xa2, 0x82, 0x32, 0x1f, - 0xb8, 0x56, 0xed, 0x29, 0xfd, 0x55, 0xf3, 0xdf, 0x42, 0xd2, 0x1c, 0xce, 0x42, 0x56, 0x99, 0xa7, - 0x3a, 0x08, 0x05, 0x84, 0x13, 0x0f, 0x24, 0x70, 0xa1, 0xd5, 0x7a, 0xb5, 0x7e, 0x73, 0xfb, 0x21, - 0x5e, 0xda, 0x47, 0x5c, 0xfe, 0x11, 0x1e, 0xde, 0xa8, 0xf6, 0x7d, 0xc9, 0xa7, 0xb3, 0xd7, 0xcd, - 0x02, 0x56, 0xc9, 0x5a, 0x9d, 0xa0, 0x75, 0x0e, 0xb6, 0x4b, 0xa8, 0x37, 0x64, 0x2e, 0xb5, 0xa7, - 0xda, 0x5f, 0xd9, 0x0b, 0xf7, 0x93, 0xb8, 0xbb, 0x6e, 0x95, 0x03, 0xd7, 0x71, 0x77, 0x6b, 0x71, - 0x02, 0xf0, 0x10, 0xb8, 0xa0, 0x42, 0x82, 0x2f, 0x4f, 0x99, 0x1b, 0x7a, 0x70, 0x4b, 0x63, 0xdd, - 0xf6, 0x56, 0x77, 0xd0, 0x9a, 0xc7, 0x42, 0x5f, 0x1e, 0x05, 0x92, 0x32, 0x5f, 0x68, 0xf5, 0x5e, - 0xad, 0xbf, 0x6a, 0xb6, 0x92, 0xb8, 0xbb, 0x76, 0x58, 0xc2, 0xad, 0x5b, 0x2c, 0xf5, 0x00, 0x6d, - 0x10, 0xd7, 0x65, 0x1f, 0xf3, 0x04, 0xfb, 0x9f, 0x02, 0xe2, 0xa7, 0x55, 0xd2, 0x56, 0x7a, 0x4a, - 0xbf, 0x61, 0x6a, 0x49, 0xdc, 0xdd, 0xd8, 0x5b, 0x12, 0xb7, 0x96, 0xaa, 0xd4, 0x57, 0xa8, 0x1d, - 0x65, 0x90, 0x49, 0xfd, 0x31, 0xf5, 0x9d, 0x43, 0x36, 0x06, 0xed, 0xef, 0xec, 0xd3, 0xf7, 0x93, - 0xb8, 0xdb, 0x3e, 0x9d, 0x0f, 0x5e, 0x2f, 0x03, 0xad, 0x45, 0x13, 0xf5, 0x03, 0x6a, 0x67, 0x19, - 0x61, 0x7c, 0xc2, 0x02, 0xe6, 0x32, 0x87, 0x82, 0xd0, 0x1a, 0x59, 0xeb, 0xfa, 0xe5, 0xd6, 0xa5, - 0xa5, 0x4b, 0xfb, 0x56, 0xb0, 0xa6, 0xc7, 0xe0, 0x82, 0x2d, 0x19, 0x3f, 0x01, 0xee, 0x99, 0xff, - 0x17, 0xfd, 0x6a, 0xef, 0xcd, 0x5b, 0x59, 0x8b, 0xee, 0x9d, 0xa7, 0xe8, 0x9f, 0xb9, 0x86, 0xab, - 0x2d, 0x54, 0x9b, 0xc0, 0x34, 0x9b, 0xe6, 0x55, 0x2b, 0x3d, 0xaa, 0x1b, 0xa8, 0x1e, 0x11, 0x37, - 0x84, 0x7c, 0xf8, 0xac, 0xfc, 0xf2, 0xa4, 0xba, 0xab, 0x6c, 0x7e, 0x53, 0x50, 0xab, 0x3c, 0x3d, - 0x07, 0x54, 0x48, 0xf5, 0xcd, 0xc2, 0x4e, 0xe0, 0xbb, 0xed, 0x44, 0xaa, 0xce, 0x36, 0xa2, 0x55, - 0xfc, 0xa1, 0xf1, 0x0b, 0x29, 0xed, 0xc3, 0x0b, 0x54, 0xa7, 0x12, 0x3c, 0xa1, 0x55, 0xb3, 0xc2, - 0xdc, 0xbb, 0xc3, 0x4c, 0x9b, 0xeb, 0x85, 0x5f, 0xfd, 0x65, 0xaa, 0xb4, 0x72, 0x03, 0xb3, 0x7f, - 0x7e, 0xa5, 0x57, 0x2e, 0xae, 0xf4, 0xca, 0xe5, 0x95, 0x5e, 0xf9, 0x9c, 0xe8, 0xca, 0x79, 0xa2, - 0x2b, 0x17, 0x89, 0xae, 0x5c, 0x26, 0xba, 0xf2, 0x3d, 0xd1, 0x95, 0xaf, 0x3f, 0xf4, 0xca, 0xeb, - 0x6a, 0x34, 0xf8, 0x19, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x64, 0x41, 0x83, 0x40, 0x05, 0x00, 0x00, + // 984 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x23, 0x45, + 0x18, 0xce, 0xc6, 0xf9, 0x70, 0xc6, 0x09, 0x97, 0x0c, 0x01, 0x8c, 0x0b, 0x3b, 0x32, 0x05, 0xe6, + 0xe0, 0x76, 0x2f, 0xe1, 0x40, 0x27, 0x24, 0x90, 0xbc, 0x60, 0x09, 0xa4, 0xf8, 0x2e, 0x9a, 0x84, + 0x13, 0x42, 0x14, 0x4c, 0x76, 0xdf, 0xdb, 0x2c, 0xf6, 0xee, 0x2c, 0x33, 0x63, 0x43, 0x3a, 0x2a, + 0x3a, 0x24, 0x68, 0xf9, 0x29, 0x94, 0x54, 0xa1, 0xbb, 0xf2, 0x2a, 0x8b, 0x2c, 0x35, 0x7f, 0x20, + 0x15, 0x9a, 0xd9, 0x89, 0xbd, 0xb1, 0xd7, 0x9c, 0xd3, 0x5c, 0xe7, 0xf7, 0xe3, 0x79, 0xde, 0xef, + 0x59, 0xa3, 0x4f, 0x7a, 0x0f, 0x85, 0x1d, 0x32, 0xa7, 0x37, 0x38, 0x05, 0x1e, 0x83, 0x04, 0xe1, + 0x0c, 0x21, 0xf6, 0x19, 0x77, 0x8c, 0x81, 0x26, 0xa1, 0x23, 0x24, 0xe3, 0x34, 0x00, 0x67, 0xb8, + 0xef, 0x04, 0x10, 0x03, 0xa7, 0x12, 0x7c, 0x3b, 0xe1, 0x4c, 0x32, 0xfc, 0x5a, 0xe6, 0x66, 0xd3, + 0x24, 0xb4, 0x8d, 0x9b, 0x3d, 0xdc, 0xaf, 0xdd, 0x0b, 0x42, 0x79, 0x36, 0x38, 0xb5, 0x3d, 0x16, + 0x39, 0x01, 0x0b, 0x98, 0xa3, 0xbd, 0x4f, 0x07, 0x4f, 0xb5, 0xa4, 0x05, 0xfd, 0x2b, 0x63, 0xa9, + 0x35, 0x73, 0xc1, 0x3c, 0xc6, 0x8b, 0x22, 0xd5, 0x1e, 0x4c, 0x7c, 0x22, 0xea, 0x9d, 0x85, 0x31, + 0xf0, 0x73, 0x27, 0xe9, 0x05, 0x4a, 0x21, 0x9c, 0x08, 0x24, 0x2d, 0x42, 0x39, 0xf3, 0x50, 0x7c, + 0x10, 0xcb, 0x30, 0x82, 0x19, 0xc0, 0x87, 0x2f, 0x02, 0x08, 0xef, 0x0c, 0x22, 0x3a, 0x8d, 0x6b, + 0xfe, 0xb2, 0x86, 0x36, 0x8f, 0xb3, 0x06, 0x7c, 0xda, 0xa7, 0x42, 0xe0, 0x6f, 0x51, 0x59, 0x25, + 0xe5, 0x53, 0x49, 0xab, 0xd6, 0x9e, 0xd5, 0xaa, 0x1c, 0xdc, 0xb7, 0x27, 0xcd, 0x1a, 0x73, 0xdb, + 0x49, 0x2f, 0x50, 0x0a, 0x61, 0x2b, 0x6f, 0x7b, 0xb8, 0x6f, 0x3f, 0x3e, 0xfd, 0x0e, 0x3c, 0xd9, + 0x05, 0x49, 0x5d, 0x7c, 0x31, 0x6a, 0x2c, 0xa5, 0xa3, 0x06, 0x9a, 0xe8, 0xc8, 0x98, 0x15, 0x7f, + 0x80, 0x2a, 0x09, 0x67, 0xc3, 0x50, 0x84, 0x2c, 0x06, 0x5e, 0x5d, 0xde, 0xb3, 0x5a, 0x1b, 0xee, + 0xab, 0x06, 0x52, 0x39, 0x9a, 0x98, 0x48, 0xde, 0x0f, 0x07, 0x08, 0x25, 0x94, 0xd3, 0x08, 0x24, + 0x70, 0x51, 0x2d, 0xed, 0x95, 0x5a, 0x95, 0x83, 0xf7, 0xed, 0xc2, 0x39, 0xda, 0xf9, 0x8a, 0xec, + 0xa3, 0x31, 0xaa, 0x13, 0x4b, 0x7e, 0x3e, 0xc9, 0x6e, 0x62, 0x20, 0x39, 0x6a, 0xdc, 0x43, 0x5b, + 0x1c, 0xbc, 0x3e, 0x0d, 0xa3, 0x23, 0xd6, 0x0f, 0xbd, 0xf3, 0xea, 0x8a, 0xce, 0xb0, 0x93, 0x8e, + 0x1a, 0x5b, 0x24, 0x6f, 0xb8, 0x1a, 0x35, 0xee, 0xcf, 0x6e, 0x80, 0x7d, 0x04, 0x5c, 0x84, 0x42, + 0x42, 0x2c, 0x9f, 0xb0, 0xfe, 0x20, 0x82, 0x1b, 0x18, 0x72, 0x93, 0x1b, 0x3f, 0x40, 0x9b, 0x11, + 0x1b, 0xc4, 0xf2, 0x71, 0x22, 0x43, 0x16, 0x8b, 0xea, 0xea, 0x5e, 0xa9, 0xb5, 0xe1, 0x6e, 0xa7, + 0xa3, 0xc6, 0x66, 0x37, 0xa7, 0x27, 0x37, 0xbc, 0xf0, 0x21, 0xda, 0xa5, 0xfd, 0x3e, 0xfb, 0x21, + 0x0b, 0xd0, 0xf9, 0x31, 0xa1, 0xb1, 0xea, 0x52, 0x75, 0x6d, 0xcf, 0x6a, 0x95, 0xdd, 0x6a, 0x3a, + 0x6a, 0xec, 0xb6, 0x0b, 0xec, 0xa4, 0x10, 0x85, 0xbf, 0x42, 0x3b, 0x43, 0xad, 0x72, 0xc3, 0xd8, + 0x0f, 0xe3, 0xa0, 0xcb, 0x7c, 0xa8, 0xae, 0xeb, 0xa2, 0xef, 0xa6, 0xa3, 0xc6, 0xce, 0x93, 0x69, + 0xe3, 0x55, 0x91, 0x92, 0xcc, 0x92, 0xe0, 0xef, 0xd1, 0x8e, 0x8e, 0x08, 0xfe, 0x09, 0x4b, 0x58, + 0x9f, 0x05, 0x21, 0x88, 0x6a, 0x59, 0x8f, 0xae, 0x95, 0x1f, 0x9d, 0x6a, 0x9d, 0x9a, 0x9b, 0xf1, + 0x3a, 0x3f, 0x86, 0x3e, 0x78, 0x92, 0xf1, 0x13, 0xe0, 0x91, 0xfb, 0xa6, 0x99, 0xd7, 0x4e, 0x7b, + 0x9a, 0x8a, 0xcc, 0xb2, 0xd7, 0x3e, 0x46, 0x77, 0xa6, 0x06, 0x8e, 0xb7, 0x51, 0xa9, 0x07, 0xe7, + 0x7a, 0x9b, 0x37, 0x88, 0xfa, 0x89, 0x77, 0xd1, 0xea, 0x90, 0xf6, 0x07, 0x90, 0x2d, 0x1f, 0xc9, + 0x84, 0x8f, 0x96, 0x1f, 0x5a, 0xcd, 0x3f, 0x2c, 0xb4, 0x9d, 0xdf, 0x9e, 0xc3, 0x50, 0x48, 0xfc, + 0xcd, 0xcc, 0x4d, 0xd8, 0x8b, 0xdd, 0x84, 0x42, 0xeb, 0x8b, 0xd8, 0x36, 0x35, 0x94, 0xaf, 0x35, + 0xb9, 0x7b, 0xf8, 0x1c, 0xad, 0x86, 0x12, 0x22, 0x51, 0x5d, 0xd6, 0x8d, 0x79, 0x6b, 0x81, 0x9d, + 0x76, 0xb7, 0x0c, 0xdf, 0xea, 0x17, 0x0a, 0x49, 0x32, 0x82, 0xe6, 0xef, 0xcb, 0x68, 0x3b, 0x9b, + 0x4b, 0x5b, 0x4a, 0xea, 0x9d, 0x45, 0x10, 0xcb, 0x97, 0x70, 0xd0, 0x5d, 0xb4, 0x22, 0x12, 0xf0, + 0x74, 0x33, 0x2b, 0x07, 0xef, 0xce, 0xc9, 0x7f, 0x3a, 0xb1, 0xe3, 0x04, 0x3c, 0x77, 0xd3, 0x10, + 0xaf, 0x28, 0x89, 0x68, 0x1a, 0xfc, 0x25, 0x5a, 0x13, 0x92, 0xca, 0x81, 0x3a, 0x72, 0x45, 0x78, + 0x6f, 0x51, 0x42, 0x0d, 0x72, 0x5f, 0x31, 0x94, 0x6b, 0x99, 0x4c, 0x0c, 0x59, 0xf3, 0x4f, 0x0b, + 0xed, 0x4e, 0x43, 0x5e, 0xc2, 0x74, 0x0f, 0x6f, 0x4e, 0xf7, 0xed, 0x05, 0x8b, 0x99, 0x33, 0xe1, + 0xa7, 0xe8, 0xf5, 0x99, 0xb2, 0xd9, 0x80, 0x7b, 0xa0, 0x9e, 0x84, 0x64, 0xea, 0xe1, 0x79, 0x44, + 0x23, 0xc8, 0xb6, 0x3e, 0x7b, 0x12, 0x8e, 0x0a, 0xec, 0xa4, 0x10, 0xd5, 0xfc, 0xab, 0xa0, 0x59, + 0x6a, 0x44, 0xf8, 0x3d, 0x54, 0xa6, 0x5a, 0x03, 0xdc, 0x50, 0x8f, 0x8b, 0x6f, 0x1b, 0x3d, 0x19, + 0x7b, 0xe8, 0x51, 0xea, 0xf4, 0xcc, 0x6e, 0x2c, 0x3c, 0x4a, 0x0d, 0xca, 0x8d, 0x52, 0xcb, 0xc4, + 0x90, 0xa9, 0x24, 0x62, 0xe6, 0x67, 0xf5, 0x95, 0x6e, 0x26, 0xf1, 0xc8, 0xe8, 0xc9, 0xd8, 0xa3, + 0xf9, 0x6f, 0xa9, 0xa0, 0x69, 0x7a, 0x27, 0x72, 0xd5, 0xf8, 0xba, 0x9a, 0xf2, 0x4c, 0x35, 0xfe, + 0xb8, 0x1a, 0x1f, 0xff, 0x66, 0x21, 0x4c, 0xc7, 0x14, 0xdd, 0xeb, 0x9d, 0xc9, 0x06, 0xdb, 0xb9, + 0xd5, 0x96, 0xda, 0xed, 0x19, 0x9e, 0xec, 0xe3, 0x54, 0x33, 0xf1, 0xf1, 0xac, 0x03, 0x29, 0x08, + 0x8e, 0x7d, 0x54, 0xc9, 0xb4, 0x1d, 0xce, 0x19, 0x37, 0x17, 0xd3, 0xfc, 0xdf, 0x5c, 0xb4, 0xa7, + 0x5b, 0x57, 0x1f, 0xdb, 0xf6, 0x04, 0x7a, 0x35, 0x6a, 0x54, 0x72, 0x76, 0x92, 0xa7, 0x55, 0x51, + 0x7c, 0x98, 0x44, 0x59, 0xb9, 0x5d, 0x94, 0xcf, 0x60, 0x7e, 0x94, 0x1c, 0x6d, 0xad, 0x83, 0xde, + 0x98, 0xd3, 0x96, 0x5b, 0x3d, 0xe1, 0x3f, 0x5b, 0x28, 0x1f, 0x03, 0x1f, 0xa2, 0x15, 0xf5, 0x0f, + 0xc8, 0xdc, 0xf6, 0xdd, 0xc5, 0x6e, 0xfb, 0x24, 0x8c, 0x60, 0xf2, 0x3a, 0x29, 0x89, 0x68, 0x16, + 0xfc, 0x0e, 0x5a, 0x8f, 0x40, 0x08, 0x1a, 0x98, 0xc8, 0xee, 0x1d, 0xe3, 0xb4, 0xde, 0xcd, 0xd4, + 0xe4, 0xda, 0xee, 0xb6, 0x2e, 0x2e, 0xeb, 0x4b, 0xcf, 0x2e, 0xeb, 0x4b, 0xcf, 0x2f, 0xeb, 0x4b, + 0x3f, 0xa5, 0x75, 0xeb, 0x22, 0xad, 0x5b, 0xcf, 0xd2, 0xba, 0xf5, 0x3c, 0xad, 0x5b, 0x7f, 0xa7, + 0x75, 0xeb, 0xd7, 0x7f, 0xea, 0x4b, 0x5f, 0x2f, 0x0f, 0xf7, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, + 0x85, 0x2a, 0x88, 0xc0, 0xcf, 0x0a, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index d1785659c02ab..668c854474369 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -31,7 +31,7 @@ option go_package = "v1"; // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. -// +// // StorageClasses are non-namespaced; the name of the storage class // according to etcd is in ObjectMeta.Name. message StorageClass { @@ -88,3 +88,99 @@ message StorageClassList { repeated StorageClass items = 2; } +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +message VolumeAttachment { + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + optional VolumeAttachmentSpec spec = 2; + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeAttachmentStatus status = 3; +} + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +message VolumeAttachmentList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of VolumeAttachments + repeated VolumeAttachment items = 2; +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +message VolumeAttachmentSource { + // Name of the persistent volume to attach. + // +optional + optional string persistentVolumeName = 1; +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +message VolumeAttachmentSpec { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + optional string attacher = 1; + + // Source represents the volume that should be attached. + optional VolumeAttachmentSource source = 2; + + // The node that the volume should be attached to. + optional string nodeName = 3; +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +message VolumeAttachmentStatus { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + optional bool attached = 1; + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + map attachmentMetadata = 2; + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError attachError = 3; + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError detachError = 4; +} + +// VolumeError captures an error encountered during a volume operation. +message VolumeError { + // Time the error was encountered. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + optional string message = 2; +} + diff --git a/vendor/k8s.io/api/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go index c058add840011..473c687278b94 100644 --- a/vendor/k8s.io/api/storage/v1/register.go +++ b/vendor/k8s.io/api/storage/v1/register.go @@ -46,6 +46,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, &StorageClassList{}, + + &VolumeAttachment{}, + &VolumeAttachmentList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 30e6d6d29bc80..9f2f67b6b7619 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -102,3 +102,110 @@ const ( // binding will occur during Pod scheduing. VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" ) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +type VolumeAttachment struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +type VolumeAttachmentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeAttachments + Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +type VolumeAttachmentSpec struct { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` + + // Source represents the volume that should be attached. + Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // The node that the volume should be attached to. + NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +type VolumeAttachmentSource struct { + // Name of the persistent volume to attach. + // +optional + PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` + + // Placeholder for *VolumeSource to accommodate inline volumes in pods. +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +type VolumeAttachmentStatus struct { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` +} + +// VolumeError captures an error encountered during a volume operation. +type VolumeError struct { + // Time the error was encountered. + // +optional + Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 23b76e28de9f0..d4a022d52eccd 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -53,4 +53,67 @@ func (StorageClassList) SwaggerDoc() map[string]string { return map_StorageClassList } +var map_VolumeAttachment = map[string]string{ + "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachment) SwaggerDoc() map[string]string { + return map_VolumeAttachment +} + +var map_VolumeAttachmentList = map[string]string{ + "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of VolumeAttachments", +} + +func (VolumeAttachmentList) SwaggerDoc() map[string]string { + return map_VolumeAttachmentList +} + +var map_VolumeAttachmentSource = map[string]string{ + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "persistentVolumeName": "Name of the persistent volume to attach.", +} + +func (VolumeAttachmentSource) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSource +} + +var map_VolumeAttachmentSpec = map[string]string{ + "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "Source represents the volume that should be attached.", + "nodeName": "The node that the volume should be attached to.", +} + +func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSpec +} + +var map_VolumeAttachmentStatus = map[string]string{ + "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { + return map_VolumeAttachmentStatus +} + +var map_VolumeError = map[string]string{ + "": "VolumeError captures an error encountered during a volume operation.", + "time": "Time the error was encountered.", + "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", +} + +func (VolumeError) SwaggerDoc() map[string]string { + return map_VolumeError +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index 0e850dc34f02d..3157ec67812ac 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -117,3 +117,152 @@ func (in *StorageClassList) DeepCopyObject() runtime.Object { } return nil } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. +func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { + if in == nil { + return nil + } + out := new(VolumeAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. +func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { + if in == nil { + return nil + } + out := new(VolumeAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { + *out = *in + if in.PersistentVolumeName != nil { + in, out := &in.PersistentVolumeName, &out.PersistentVolumeName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. +func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { + if in == nil { + return nil + } + out := new(VolumeAttachmentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. +func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { + if in == nil { + return nil + } + out := new(VolumeAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { + *out = *in + if in.AttachmentMetadata != nil { + in, out := &in.AttachmentMetadata, &out.AttachmentMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AttachError != nil { + in, out := &in.AttachError, &out.AttachError + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + if in.DetachError != nil { + in, out := &in.DetachError, &out.DetachError + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. +func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { + if in == nil { + return nil + } + out := new(VolumeAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeError) DeepCopyInto(out *VolumeError) { + *out = *in + in.Time.DeepCopyInto(&out.Time) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. +func (in *VolumeError) DeepCopy() *VolumeError { + if in == nil { + return nil + } + out := new(VolumeError) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/doc.go b/vendor/k8s.io/api/storage/v1alpha1/doc.go index aa94aff7fbb55..0056b00d975b0 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/doc.go +++ b/vendor/k8s.io/api/storage/v1alpha1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +groupName=storage.k8s.io // +k8s:openapi-gen=true + package v1alpha1 // import "k8s.io/api/storage/v1alpha1" diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 507b5c1d5e689..0511ccabd8116 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -324,24 +323,6 @@ func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1076,51 +1057,14 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.AttachmentMetadata == nil { m.AttachmentMetadata = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1130,41 +1074,80 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.AttachmentMetadata[mapkey] = mapvalue - } else { - var mapvalue string - m.AttachmentMetadata[mapkey] = mapvalue } + m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index ccb947540f118..fdc4ad257d39f 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -30,7 +30,7 @@ option go_package = "v1alpha1"; // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. -// +// // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index 32d7dcc52f92b..3701b08640d53 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -49,7 +49,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "Name of the persistent volume to attach.", } diff --git a/vendor/k8s.io/api/storage/v1beta1/doc.go b/vendor/k8s.io/api/storage/v1beta1/doc.go index 8957a4cf2455f..ea7667dda38d8 100644 --- a/vendor/k8s.io/api/storage/v1beta1/doc.go +++ b/vendor/k8s.io/api/storage/v1beta1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true + package v1beta1 // import "k8s.io/api/storage/v1beta1" diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index fed8c7a6308c1..3e995f039fc83 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -477,24 +476,6 @@ func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -892,51 +873,14 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Parameters == nil { m.Parameters = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -946,41 +890,80 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Parameters[mapkey] = mapvalue - } else { - var mapvalue string - m.Parameters[mapkey] = mapvalue } + m.Parameters[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -1799,51 +1782,14 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.AttachmentMetadata == nil { m.AttachmentMetadata = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1853,41 +1799,80 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.AttachmentMetadata[mapkey] = mapvalue - } else { - var mapvalue string - m.AttachmentMetadata[mapkey] = mapvalue } + m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index ecf53bef60e6c..db1f302a053a0 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -31,7 +31,7 @@ option go_package = "v1beta1"; // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. -// +// // StorageClasses are non-namespaced; the name of the storage class // according to etcd is in ObjectMeta.Name. message StorageClass { @@ -90,7 +90,7 @@ message StorageClassList { // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. -// +// // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 044d69f5855f2..834553e1a8045 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -75,7 +75,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "Name of the persistent volume to attach.", } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go index 24e72f91e596d..8af73d2e19630 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go @@ -40,6 +40,12 @@ const ( // // CustomResourceSubresources defines the subresources for CustomResources CustomResourceSubresources utilfeature.Feature = "CustomResourceSubresources" + + // owner: @mbohlool, @roycaihw + // alpha: v1.13 + // + // CustomResourceWebhookConversion defines the webhook conversion for Custom Resources. + CustomResourceWebhookConversion utilfeature.Feature = "CustomResourceWebhookConversion" ) func init() { @@ -50,6 +56,7 @@ func init() { // To add a new feature, define a key for it above and add it here. The features will be // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ - CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, - CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta}, + CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, + CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta}, + CustomResourceWebhookConversion: {Default: false, PreRelease: utilfeature.Alpha}, } diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index bcc032df9dd6b..48c1104d99fae 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "net/http" + "reflect" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -82,7 +83,20 @@ func (u *UnexpectedObjectError) Error() string { func FromObject(obj runtime.Object) error { switch t := obj.(type) { case *metav1.Status: - return &StatusError{*t} + return &StatusError{ErrStatus: *t} + case runtime.Unstructured: + var status metav1.Status + obj := t.UnstructuredContent() + if !reflect.DeepEqual(obj["kind"], "Status") { + break + } + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(t.UnstructuredContent(), &status); err != nil { + return err + } + if status.APIVersion != "v1" && status.APIVersion != "meta.k8s.io/v1" { + break + } + return &StatusError{ErrStatus: status} } return &UnexpectedObjectError{obj} } @@ -327,6 +341,17 @@ func NewTooManyRequestsError(message string) *StatusError { }} } +// NewRequestEntityTooLargeError returns an error indicating that the request +// entity was too large. +func NewRequestEntityTooLargeError(message string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusRequestEntityTooLarge, + Reason: metav1.StatusReasonRequestEntityTooLarge, + Message: fmt.Sprintf("Request entity too large: %s", message), + }} +} + // NewGenericServerResponse returns a new error for server responses that are not in a recognizable form. func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError { reason := metav1.StatusReasonUnknown @@ -513,6 +538,19 @@ func IsTooManyRequests(err error) bool { return false } +// IsRequestEntityTooLargeError determines if err is an error which indicates +// the request entity is too large. +func IsRequestEntityTooLargeError(err error) bool { + if ReasonForError(err) == metav1.StatusReasonRequestEntityTooLarge { + return true + } + switch t := err.(type) { + case APIStatus: + return t.Status().Code == http.StatusRequestEntityTooLarge + } + return false +} + // IsUnexpectedServerError returns true if the server response was not in the expected API format, // and may be the result of another HTTP actor. func IsUnexpectedServerError(err error) bool { diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD.bazel index 329569cf4ce8c..8de6f06ce89fd 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD.bazel +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD.bazel @@ -18,7 +18,6 @@ go_library( importpath = "k8s.io/apimachinery/pkg/api/meta", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", @@ -27,5 +26,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 1c2a83cfacbb2..6fe7458f6c4ff 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -20,7 +20,7 @@ import ( "fmt" "reflect" - "github.com/golang/glog" + "k8s.io/klog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" @@ -132,12 +132,12 @@ func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata CreationTimestamp: m.GetCreationTimestamp(), DeletionTimestamp: m.GetDeletionTimestamp(), DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(), - Labels: m.GetLabels(), - Annotations: m.GetAnnotations(), - OwnerReferences: m.GetOwnerReferences(), - Finalizers: m.GetFinalizers(), - ClusterName: m.GetClusterName(), - Initializers: m.GetInitializers(), + Labels: m.GetLabels(), + Annotations: m.GetAnnotations(), + OwnerReferences: m.GetOwnerReferences(), + Finalizers: m.GetFinalizers(), + ClusterName: m.GetClusterName(), + Initializers: m.GetInitializers(), }, } } @@ -607,7 +607,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { var ret []metav1.OwnerReference s := a.ownerReferences if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) + klog.Errorf("expect %v to be a pointer to slice", s) return ret } s = s.Elem() @@ -615,7 +615,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1) for i := 0; i < s.Len(); i++ { if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { - glog.Errorf("extractFromOwnerReference failed: %v", err) + klog.Errorf("extractFromOwnerReference failed: %v", err) return ret } } @@ -625,13 +625,13 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { s := a.ownerReferences if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) + klog.Errorf("expect %v to be a pointer to slice", s) } s = s.Elem() newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) for i := 0; i < len(references); i++ { if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { - glog.Errorf("setOwnerReference failed: %v", err) + klog.Errorf("setOwnerReference failed: %v", err) return } } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 802f22a63f37c..9d7835bc23f96 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto -// DO NOT EDIT! /* Package resource is a generated protocol buffer package. diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 2c615d51bf20b..acc9044452287 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -27,9 +27,9 @@ option go_package = "resource"; // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, // in addition to String() and Int64() accessors. -// +// // The serialization format is: -// +// // ::= // (Note that may be empty, from the "" case in .) // ::= 0 | 1 | ... | 9 @@ -43,16 +43,16 @@ option go_package = "resource"; // ::= m | "" | k | M | G | T | P | E // (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) // ::= "e" | "E" -// +// // No matter which of the three exponent forms is used, no quantity may represent // a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal // places. Numbers larger or more precise will be capped or rounded up. // (E.g.: 0.1m will rounded up to 1m.) // This may be extended in the future if we require larger or smaller quantities. -// +// // When a Quantity is parsed from a string, it will remember the type of suffix // it had, and will use the same type again when it is serialized. -// +// // Before serializing, Quantity will be put in "canonical form". // This means that Exponent/suffix will be adjusted up or down (with a // corresponding increase or decrease in Mantissa) such that: @@ -60,22 +60,22 @@ option go_package = "resource"; // b. No fractional digits will be emitted // c. The exponent (or suffix) is as large as possible. // The sign will be omitted unless the number is negative. -// +// // Examples: // 1.5 will be serialized as "1500m" // 1.5Gi will be serialized as "1536Mi" -// +// // Note that the quantity will NEVER be internally represented by a // floating point number. That is the whole point of this exercise. -// +// // Non-canonical values will still parse as long as they are well formed, // but will be re-emitted in their canonical form. (So always use canonical // form, or don't diff.) -// +// // This format is intended to make it difficult to use these numbers without // writing some sort of special handling code in the hopes that that will // cause implementors to also use a fixed point implementation. -// +// // +protobuf=true // +protobuf.embed=string // +protobuf.options.marshal=false diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go index 61f201cdf5d94..dbaa87c879fc1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=meta.k8s.io + package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index b7508f033e355..81320c9c88272 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -1768,24 +1767,6 @@ func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -5043,51 +5024,14 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.MatchLabels == nil { m.MatchLabels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5097,41 +5041,80 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.MatchLabels[mapkey] = mapvalue - } else { - var mapvalue string - m.MatchLabels[mapkey] = mapvalue } + m.MatchLabels[mapkey] = mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -6146,51 +6129,14 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6200,41 +6146,80 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 12: if wireType != 2 { @@ -6262,51 +6247,14 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6316,41 +6264,80 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 13: if wireType != 2 { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index eb3237f2b7179..989f076a1031d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -107,7 +107,7 @@ message APIResourceList { // APIVersions lists the versions that are available, to allow clients to // discover the API at /api, which is the root path of the legacy v1 API. -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message APIVersions { @@ -211,7 +211,7 @@ message GetOptions { // GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying // concepts during lookup stages without having partially valid types -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupKind { optional string group = 1; @@ -221,7 +221,7 @@ message GroupKind { // GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying // concepts during lookup stages without having partially valid types -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupResource { optional string group = 1; @@ -230,7 +230,7 @@ message GroupResource { } // GroupVersion contains the "group" and the "version", which uniquely identifies the API. -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersion { optional string group = 1; @@ -251,7 +251,7 @@ message GroupVersionForDiscovery { // GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion // to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersionKind { optional string group = 1; @@ -263,7 +263,7 @@ message GroupVersionKind { // GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion // to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersionResource { optional string group = 1; @@ -411,7 +411,7 @@ message ListOptions { // more results are available. Servers may choose not to support the limit argument and will return // all of the available results. If limit is specified and the continue field is empty, clients may // assume that no more results are available. This field is not supported if watch is true. - // + // // The server guarantees that the objects returned when using continue will be identical to issuing // a single list call without a limit - that is, no objects created, modified, or deleted after the // first request is issued will be included in any subsequent continued requests. This is sometimes @@ -432,14 +432,14 @@ message ListOptions { // a list starting from the next key, but from the latest snapshot, which is inconsistent from the // previous list results - objects that are created, modified, or deleted after the first list request // will be included in the response, as long as their keys are after the "next key". - // + // // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. optional string continue = 8; } // MicroTime is version of Time with microsecond level precision. -// +// // +protobuf.options.marshal=false // +protobuf.as=Timestamp // +protobuf.options.(gogoproto.goproto_stringer)=false @@ -475,12 +475,12 @@ message ObjectMeta { // The provided value has the same validation rules as the Name field, // and may be truncated by the length of the suffix required to make the value // unique on the server. - // + // // If this field is specified and the generated name exists, the server will // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason // ServerTimeout indicating a unique name could not be found in the time allotted, and the client // should retry (optionally after the time indicated in the Retry-After header). - // + // // Applied only if Name is not specified. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency // +optional @@ -490,7 +490,7 @@ message ObjectMeta { // equivalent to the "default" namespace, but "default" is the canonical representation. // Not all objects are required to be scoped to a namespace - the value of this field for // those objects will be empty. - // + // // Must be a DNS_LABEL. // Cannot be updated. // More info: http://kubernetes.io/docs/user-guide/namespaces @@ -506,7 +506,7 @@ message ObjectMeta { // UID is the unique in time and space value for this object. It is typically generated by // the server on successful creation of a resource and is not allowed to change on PUT // operations. - // + // // Populated by the system. // Read-only. // More info: http://kubernetes.io/docs/user-guide/identifiers#uids @@ -518,7 +518,7 @@ message ObjectMeta { // concurrency, change detection, and the watch operation on a resource or set of resources. // Clients must treat these values as opaque and passed unmodified back to the server. // They may only be valid for a particular resource or set of resources. - // + // // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . @@ -534,7 +534,7 @@ message ObjectMeta { // CreationTimestamp is a timestamp representing the server time when this object was // created. It is not guaranteed to be set in happens-before order across separate operations. // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // + // // Populated by the system. // Read-only. // Null for lists. @@ -556,7 +556,7 @@ message ObjectMeta { // exist after this timestamp, until an administrator or automated process can determine the // resource is fully terminated. // If not set, graceful deletion of the object has not been requested. - // + // // Populated by the system when a graceful deletion is requested. // Read-only. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata @@ -598,7 +598,7 @@ message ObjectMeta { // this object has been completely initialized. Otherwise, the object is considered uninitialized // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to // observe uninitialized objects. - // + // // When an object is created, the system will populate this list with the current set of initializers. // Only privileged users may set or modify this list. Once it is empty, it may not be modified further // by any user. @@ -620,8 +620,8 @@ message ObjectMeta { } // OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. +// object. An owning object must be in the same namespace as the dependent, or +// be cluster-scoped, so there is no namespace field. message OwnerReference { // API version of the referent. optional string apiVersion = 5; @@ -734,7 +734,7 @@ message StatusCause { // Arrays are zero-indexed. Fields may appear more than once in an array of // causes due to fields having multiple errors. // Optional. - // + // // Examples: // "name" - the field "name" on the current resource // "items[0].name" - the field "name" on the first array entry in "items" @@ -785,7 +785,7 @@ message StatusDetails { // Time is a wrapper around time.Time which supports correct // marshaling to YAML and JSON. Wrappers are provided for many // of the factory methods that the time package offers. -// +// // +protobuf.options.marshal=false // +protobuf.as=Timestamp // +protobuf.options.(gogoproto.goproto_stringer)=false @@ -821,7 +821,7 @@ message Timestamp { // TypeMeta describes an individual object in an API response or request // with strings representing the type of the object and its API schema version. // Structures that are versioned or persisted should inline TypeMeta. -// +// // +k8s:deepcopy-gen=false message TypeMeta { // Kind is a string value representing the REST resource this object represents. @@ -852,7 +852,7 @@ message UpdateOptions { } // Verbs masks the value so protobuf can generate -// +// // +protobuf.nullable=true // +protobuf.options.(gogoproto.goproto_stringer)=false message Verbs { @@ -862,7 +862,7 @@ message Verbs { } // Event represents a single event to a watched resource. -// +// // +protobuf=true // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 4d3a55d716974..65f87546d20e5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -286,8 +286,8 @@ const ( ) // OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. +// object. An owning object must be in the same namespace as the dependent, or +// be cluster-scoped, so there is no namespace field. type OwnerReference struct { // API version of the referent. APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` @@ -713,6 +713,10 @@ const ( // Status code 406 StatusReasonNotAcceptable StatusReason = "NotAcceptable" + // StatusReasonRequestEntityTooLarge means that the request entity is too large. + // Status code 413 + StatusReasonRequestEntityTooLarge StatusReason = "RequestEntityTooLarge" + // StatusReasonUnsupportedMediaType means that the content type sent by the client is not acceptable // to the server - for instance, attempting to send protobuf for a resource that supports only json and yaml. // API calls that return UnsupportedMediaType can never succeed. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 35e800f8a4889..679e709e8e39c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -240,7 +240,7 @@ func (ObjectMeta) SwaggerDoc() map[string]string { } var map_OwnerReference = map[string]string{ - "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", + "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", "apiVersion": "API version of the referent.", "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go index dc461cc29682f..46b0e133c37c2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=meta.k8s.io + package v1beta1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index fe3df6916b08f..47c03d69b9641 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -148,24 +147,6 @@ func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/apimachinery/pkg/labels/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/labels/BUILD.bazel index fb063a89079c9..5553abcef4483 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/BUILD.bazel +++ b/vendor/k8s.io/apimachinery/pkg/labels/BUILD.bazel @@ -12,9 +12,9 @@ go_library( importpath = "k8s.io/apimachinery/pkg/labels", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 374d2ef1377a8..f5a0888932f26 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -23,10 +23,10 @@ import ( "strconv" "strings" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog" ) // Requirements is AND of all requirements. @@ -211,13 +211,13 @@ func (r *Requirement) Matches(ls Labels) bool { } lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) if err != nil { - glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) return false } // There should be only one strValue in r.strValues, and can be converted to a integer. if len(r.strValues) != 1 { - glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) return false } @@ -225,7 +225,7 @@ func (r *Requirement) Matches(ls Labels) bool { for i := range r.strValues { rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) if err != nil { - glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) return false } } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/runtime/BUILD.bazel index 59e05436505bb..8ad7d28ce1ee4 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/BUILD.bazel +++ b/vendor/k8s.io/apimachinery/pkg/runtime/BUILD.bazel @@ -27,7 +27,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion/queryparams:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -36,5 +35,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/naming:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go index 6b859b288978e..284e32bc3cb8e 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -283,6 +283,7 @@ var _ GroupVersioner = multiGroupVersioner{} type multiGroupVersioner struct { target schema.GroupVersion acceptedGroupKinds []schema.GroupKind + coerce bool } // NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds. @@ -294,6 +295,22 @@ func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKi return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds} } +// NewCoercingMultiGroupVersioner returns the provided group version for any incoming kind. +// Incoming kinds that match the provided groupKinds are preferred. +// Kind may be empty in the provided group kind, in which case any kind will match. +// Examples: +// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind) +// +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group) +// +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list) +func NewCoercingMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner { + return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds, coerce: true} +} + // KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will // use the originating kind where possible. func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { @@ -308,5 +325,8 @@ func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersio return v.target.WithKind(src.Kind), true } } + if v.coerce && len(kinds) > 0 { + return v.target.WithKind(kinds[0].Kind), true + } return schema.GroupVersionKind{}, false } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 291d7a4e888cb..dff56e03401a6 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "github.com/golang/glog" + "k8s.io/klog" ) // UnstructuredConverter is an interface for converting between interface{} @@ -133,10 +133,10 @@ func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj i newObj := reflect.New(t.Elem()).Interface() newErr := fromUnstructuredViaJSON(u, newObj) if (err != nil) != (newErr != nil) { - glog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) + klog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) } if err == nil && !c.comparison.DeepEqual(obj, newObj) { - glog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) + klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) } } return err @@ -424,10 +424,10 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte newUnstr := map[string]interface{}{} newErr := toUnstructuredViaJSON(obj, &newUnstr) if (err != nil) != (newErr != nil) { - glog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) + klog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) } if err == nil && !c.comparison.DeepEqual(u, newUnstr) { - glog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) + klog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) } } if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 967e0f530a32f..9b15989c82700 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto -// DO NOT EDIT! /* Package runtime is a generated protocol buffer package. @@ -158,24 +157,6 @@ func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto index fb61ac96a9300..0e212ec941fff 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -25,11 +25,11 @@ package k8s.io.apimachinery.pkg.runtime; option go_package = "runtime"; // RawExtension is used to hold extensions in external versions. -// +// // To use this, make a field which has RawExtension as its type in your external, versioned // struct, and Object in your internal struct. You also need to register your // various plugin types. -// +// // // Internal package: // type MyAPIObject struct { // runtime.TypeMeta `json:",inline"` @@ -38,7 +38,7 @@ option go_package = "runtime"; // type PluginA struct { // AOption string `json:"aOption"` // } -// +// // // External package: // type MyAPIObject struct { // runtime.TypeMeta `json:",inline"` @@ -47,7 +47,7 @@ option go_package = "runtime"; // type PluginA struct { // AOption string `json:"aOption"` // } -// +// // // On the wire, the JSON will look something like this: // { // "kind":"MyAPIObject", @@ -57,7 +57,7 @@ option go_package = "runtime"; // "aOption":"foo", // }, // } -// +// // So what happens? Decode first uses json or yaml to unmarshal the serialized data into // your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. // The next step is to copy (using pkg/conversion) into the internal struct. The runtime @@ -65,13 +65,13 @@ option go_package = "runtime"; // JSON stored in RawExtension, turning it into the correct object type, and storing it // in the Object. (TODO: In the case where the object is of an unknown type, a // runtime.Unknown object will be created and stored.) -// +// // +k8s:deepcopy-gen=true // +protobuf=true // +k8s:openapi-gen=true message RawExtension { // Raw is the underlying serialization of this object. - // + // // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. optional bytes raw = 1; } @@ -83,10 +83,10 @@ message RawExtension { // ... // other fields // } // func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind -// +// // TypeMeta is provided here for convenience. You may use it directly from this package or define // your own with the same fields. -// +// // +k8s:deepcopy-gen=false // +protobuf=true // +k8s:openapi-gen=true @@ -103,7 +103,7 @@ message TypeMeta { // TypeMeta features-- kind, version, etc. // TODO: Make this object have easy access to field based accessors and settors for // metadata and field mutatation. -// +// // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +protobuf=true diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index 5c9934c7399aa..28a61d5fb574e 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto -// DO NOT EDIT! /* Package schema is a generated protocol buffer package. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index 5f02961d326bc..4c67ed59801be 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -66,7 +66,7 @@ func (gr GroupResource) Empty() bool { return len(gr.Group) == 0 && len(gr.Resource) == 0 } -func (gr *GroupResource) String() string { +func (gr GroupResource) String() string { if len(gr.Group) == 0 { return gr.Resource } @@ -111,7 +111,7 @@ func (gvr GroupVersionResource) GroupVersion() GroupVersion { return GroupVersion{Group: gvr.Group, Version: gvr.Version} } -func (gvr *GroupVersionResource) String() string { +func (gvr GroupVersionResource) String() string { return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") } @@ -130,7 +130,7 @@ func (gk GroupKind) WithVersion(version string) GroupVersionKind { return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} } -func (gk *GroupKind) String() string { +func (gk GroupKind) String() string { if len(gk.Group) == 0 { return gk.Kind } @@ -281,8 +281,8 @@ func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersio // ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that // do not use TypeMeta. -func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) { - if gvk == nil { +func (gvk GroupVersionKind) ToAPIVersionAndKind() (string, string) { + if gvk.Empty() { return "", "" } return gvk.GroupVersion().String(), gvk.Kind diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD.bazel index 89052b5022c53..843b8718a46db 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD.bazel +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD.bazel @@ -10,7 +10,6 @@ go_library( importpath = "k8s.io/apimachinery/pkg/runtime/serializer/json", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/json-iterator/go:go_default_library", "//vendor/github.com/modern-go/reflect2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -18,5 +17,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/framer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 382c4858e7fbe..8987e74c680e3 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -22,9 +22,9 @@ import ( "strconv" "unsafe" - "github.com/ghodss/yaml" jsoniter "github.com/json-iterator/go" "github.com/modern-go/reflect2" + "sigs.k8s.io/yaml" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index 91fd4ed4f0b37..a60a7c04156be 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -64,7 +64,7 @@ func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder { reader: r, decoder: d, buf: make([]byte, 1024), - maxBytes: 1024 * 1024, + maxBytes: 16 * 1024 * 1024, } } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index a5ae3ac4bb7f7..00184710760b6 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -18,6 +18,7 @@ package versioning import ( "io" + "reflect" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -90,7 +91,16 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru into = versioned.Last() } - obj, gvk, err := c.decoder.Decode(data, defaultGVK, into) + // If the into object is unstructured and expresses an opinion about its group/version, + // create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`) + decodeInto := into + if into != nil { + if _, ok := into.(runtime.Unstructured); ok && !into.GetObjectKind().GroupVersionKind().GroupVersion().Empty() { + decodeInto = reflect.New(reflect.TypeOf(into).Elem()).Interface().(runtime.Object) + } + } + + obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto) if err != nil { return nil, gvk, err } diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go index 7c9b791d4ff0a..50d9a366f368c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -136,12 +136,12 @@ func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []strin negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols) if len(negotiatedProtocol) == 0 { - w.WriteHeader(http.StatusForbidden) for i := range serverProtocols { w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i]) } - fmt.Fprintf(w, "unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols) - return "", fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server supports %v", clientProtocols, serverProtocols) + err := fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols) + http.Error(w, err.Error(), http.StatusForbidden) + return "", err } w.Header().Add(HeaderProtocolVersion, negotiatedProtocol) diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD.bazel index 8c9190e43a756..a3307efedc074 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD.bazel +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD.bazel @@ -12,7 +12,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/docker/spdystream:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -21,5 +20,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/third_party/forked/golang/netutil:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go index 3dc8e23ae1418..9d222faa898fa 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go @@ -23,8 +23,8 @@ import ( "time" "github.com/docker/spdystream" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog" ) // connection maintains state about a spdystream.Connection and its associated @@ -128,7 +128,7 @@ func (c *connection) newSpdyStream(stream *spdystream.Stream) { err := c.newStreamHandler(stream, replySent) rejectStream := (err != nil) if rejectStream { - glog.Warningf("Stream rejected: %v", err) + klog.Warningf("Stream rejected: %v", err) stream.Reset() return } diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go index 13353988faab0..045d214d2b758 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go @@ -74,15 +74,15 @@ func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Reque connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection)) upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade)) if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(w, "unable to upgrade: missing upgrade headers in request: %#v", req.Header) + errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header) + http.Error(w, errorMsg, http.StatusBadRequest) return nil } hijacker, ok := w.(http.Hijacker) if !ok { - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "unable to upgrade: unable to hijack response") + errorMsg := fmt.Sprintf("unable to upgrade: unable to hijack response") + http.Error(w, errorMsg, http.StatusInternalServerError) return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD.bazel index 6c2353f6962e6..8bd8aac055814 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD.bazel +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD.bazel @@ -11,7 +11,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 5c2ac4f23fde8..48dd7d9c551d0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto -// DO NOT EDIT! /* Package intstr is a generated protocol buffer package. @@ -81,24 +80,6 @@ func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto index 1c3ec732e7c3c..e79fb9e57266b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -29,7 +29,7 @@ option go_package = "intstr"; // inner type. This allows you to have, for example, a JSON field that can // accept a name or number. // TODO: Rename to Int32OrString -// +// // +protobuf=true // +protobuf.options.(gogoproto.goproto_stringer)=false // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 642b83cec2173..5b26ed262631b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,8 +25,8 @@ import ( "strconv" "strings" - "github.com/golang/glog" "github.com/google/gofuzz" + "k8s.io/klog" ) // IntOrString is a type that can hold an int32 or a string. When used in @@ -58,7 +58,7 @@ const ( // TODO: convert to (val int32) func FromInt(val int) IntOrString { if val > math.MaxInt32 || val < math.MinInt32 { - glog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) + klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) } return IntOrString{Type: Int, IntVal: int32(val)} } diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD.bazel index cb074fdb2fa81..9fb1d0e6bb898 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD.bazel +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD.bazel @@ -11,6 +11,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go index d09a939be30eb..990fa0d43a64f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -21,7 +21,7 @@ import ( "reflect" "github.com/davecgh/go-spew/spew" - "github.com/ghodss/yaml" + "sigs.k8s.io/yaml" ) // PreconditionFunc asserts that an incompatible change is not present within a patch. diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/net/BUILD.bazel index 18455b618d4d1..fe3081f65b209 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/BUILD.bazel +++ b/vendor/k8s.io/apimachinery/pkg/util/net/BUILD.bazel @@ -13,8 +13,8 @@ go_library( importpath = "k8s.io/apimachinery/pkg/util/net", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 7c2a5e6286d1c..155667cdfc7f7 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -31,8 +31,8 @@ import ( "strconv" "strings" - "github.com/golang/glog" "golang.org/x/net/http2" + "k8s.io/klog" ) // JoinPreservingTrailingSlash does a path.Join of the specified elements, @@ -107,10 +107,10 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { t = SetOldTransportDefaults(t) // Allow clients to disable http2 if needed. if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { - glog.Infof("HTTP2 has been explicitly disabled") + klog.Infof("HTTP2 has been explicitly disabled") } else { if err := http2.ConfigureTransport(t); err != nil { - glog.Warningf("Transport failed http2 configuration: %v", err) + klog.Warningf("Transport failed http2 configuration: %v", err) } } return t @@ -368,7 +368,7 @@ redirectLoop: resp, err := http.ReadResponse(respReader, nil) if err != nil { // Unable to read the backend response; let the client handle it. - glog.Warningf("Error reading backend response: %v", err) + klog.Warningf("Error reading backend response: %v", err) break redirectLoop } diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go index 0ab9b36080b5c..daf5d24964559 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -26,7 +26,7 @@ import ( "strings" - "github.com/golang/glog" + "k8s.io/klog" ) type AddressFamily uint @@ -193,7 +193,7 @@ func isInterfaceUp(intf *net.Interface) bool { return false } if intf.Flags&net.FlagUp != 0 { - glog.V(4).Infof("Interface %v is up", intf.Name) + klog.V(4).Infof("Interface %v is up", intf.Name) return true } return false @@ -208,20 +208,20 @@ func isLoopbackOrPointToPoint(intf *net.Interface) bool { func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) { if len(addrs) > 0 { for i := range addrs { - glog.V(4).Infof("Checking addr %s.", addrs[i].String()) + klog.V(4).Infof("Checking addr %s.", addrs[i].String()) ip, _, err := net.ParseCIDR(addrs[i].String()) if err != nil { return nil, err } if memberOf(ip, family) { if ip.IsGlobalUnicast() { - glog.V(4).Infof("IP found %v", ip) + klog.V(4).Infof("IP found %v", ip) return ip, nil } else { - glog.V(4).Infof("Non-global unicast address found %v", ip) + klog.V(4).Infof("Non-global unicast address found %v", ip) } } else { - glog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) + klog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) } } @@ -241,13 +241,13 @@ func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInte if err != nil { return nil, err } - glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) + klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) matchingIP, err := getMatchingGlobalIP(addrs, forFamily) if err != nil { return nil, err } if matchingIP != nil { - glog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) + klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) return matchingIP, nil } } @@ -275,14 +275,14 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, fmt.Errorf("no interfaces found on host.") } for _, family := range []AddressFamily{familyIPv4, familyIPv6} { - glog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) + klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) for _, intf := range intfs { if !isInterfaceUp(&intf) { - glog.V(4).Infof("Skipping: down interface %q", intf.Name) + klog.V(4).Infof("Skipping: down interface %q", intf.Name) continue } if isLoopbackOrPointToPoint(&intf) { - glog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) + klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) continue } addrs, err := nw.Addrs(&intf) @@ -290,7 +290,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, err } if len(addrs) == 0 { - glog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) + klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) continue } for _, addr := range addrs { @@ -299,15 +299,15 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) } if !memberOf(ip, family) { - glog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) + klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) continue } // TODO: Decide if should open up to allow IPv6 LLAs in future. if !ip.IsGlobalUnicast() { - glog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) + klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) continue } - glog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) + klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) return ip, nil } } @@ -381,23 +381,23 @@ func getAllDefaultRoutes() ([]Route, error) { // an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP. func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) { for _, family := range []AddressFamily{familyIPv4, familyIPv6} { - glog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) + klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) for _, route := range routes { if route.Family != family { continue } - glog.V(4).Infof("Default route transits interface %q", route.Interface) + klog.V(4).Infof("Default route transits interface %q", route.Interface) finalIP, err := getIPFromInterface(route.Interface, family, nw) if err != nil { return nil, err } if finalIP != nil { - glog.V(4).Infof("Found active IP %v ", finalIP) + klog.V(4).Infof("Found active IP %v ", finalIP) return finalIP, nil } } } - glog.V(4).Infof("No active IP found by looking at default routes") + klog.V(4).Infof("No active IP found by looking at default routes") return nil, fmt.Errorf("unable to select an IP from default routes.") } diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD.bazel index 5fed595f00cb3..67674fcd57818 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD.bazel +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD.bazel @@ -12,7 +12,6 @@ go_library( importpath = "k8s.io/apimachinery/pkg/util/proxy", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/mxk/go-flowrate/flowrate:go_default_library", "//vendor/golang.org/x/net/html:go_default_library", "//vendor/golang.org/x/net/html/atom:go_default_library", @@ -22,5 +21,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/third_party/forked/golang/netutil:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go index 37a5be487c0b9..a59b24c8dc3ec 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go @@ -24,7 +24,7 @@ import ( "net/http" "net/url" - "github.com/golang/glog" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/third_party/forked/golang/netutil" @@ -35,7 +35,7 @@ func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (ne dialer, err := utilnet.DialerFor(transport) if err != nil { - glog.V(5).Infof("Unable to unwrap transport %T to get dialer: %v", transport, err) + klog.V(5).Infof("Unable to unwrap transport %T to get dialer: %v", transport, err) } switch url.Scheme { @@ -52,7 +52,7 @@ func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (ne var err error tlsConfig, err = utilnet.TLSClientConfig(transport) if err != nil { - glog.V(5).Infof("Unable to unwrap transport %T to get at TLS config: %v", transport, err) + klog.V(5).Infof("Unable to unwrap transport %T to get at TLS config: %v", transport, err) } if dialer != nil { @@ -64,7 +64,7 @@ func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (ne } if tlsConfig == nil { // tls.Client requires non-nil config - glog.Warningf("using custom dialer with no TLSClientConfig. Defaulting to InsecureSkipVerify") + klog.Warningf("using custom dialer with no TLSClientConfig. Defaulting to InsecureSkipVerify") // tls.Handshake() requires ServerName or InsecureSkipVerify tlsConfig = &tls.Config{ InsecureSkipVerify: true, diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go index 6c34ab5241de9..3c8cf6da73772 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go @@ -27,9 +27,9 @@ import ( "path" "strings" - "github.com/golang/glog" "golang.org/x/net/html" "golang.org/x/net/html/atom" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/sets" @@ -236,7 +236,7 @@ func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*ht // This is fine default: // Some encoding we don't understand-- don't try to parse this - glog.Errorf("Proxy encountered encoding %v for text/html; can't understand this so not fixing links.", encoding) + klog.Errorf("Proxy encountered encoding %v for text/html; can't understand this so not fixing links.", encoding) return resp, nil } @@ -245,7 +245,7 @@ func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*ht } err := rewriteHTML(reader, writer, urlRewriter) if err != nil { - glog.Errorf("Failed to rewrite URLs: %v", err) + klog.Errorf("Failed to rewrite URLs: %v", err) return resp, err } diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go index 4593c20fdc3ff..3c8e09399f512 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go @@ -35,8 +35,8 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "github.com/golang/glog" "github.com/mxk/go-flowrate/flowrate" + "k8s.io/klog" ) // UpgradeRequestRoundTripper provides an additional method to decorate a request @@ -236,7 +236,7 @@ func (h *UpgradeAwareHandler) ServeHTTP(w http.ResponseWriter, req *http.Request // tryUpgrade returns true if the request was handled. func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Request) bool { if !httpstream.IsUpgradeRequest(req) { - glog.V(6).Infof("Request was not an upgrade") + klog.V(6).Infof("Request was not an upgrade") return false } @@ -258,24 +258,24 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques // handles this in the non-upgrade path. utilnet.AppendForwardedForHeader(clone) if h.InterceptRedirects { - glog.V(6).Infof("Connecting to backend proxy (intercepting redirects) %s\n Headers: %v", &location, clone.Header) + klog.V(6).Infof("Connecting to backend proxy (intercepting redirects) %s\n Headers: %v", &location, clone.Header) backendConn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, &location, clone.Header, req.Body, utilnet.DialerFunc(h.DialForUpgrade), h.RequireSameHostRedirects) } else { - glog.V(6).Infof("Connecting to backend proxy (direct dial) %s\n Headers: %v", &location, clone.Header) + klog.V(6).Infof("Connecting to backend proxy (direct dial) %s\n Headers: %v", &location, clone.Header) clone.URL = &location backendConn, err = h.DialForUpgrade(clone) } if err != nil { - glog.V(6).Infof("Proxy connection error: %v", err) + klog.V(6).Infof("Proxy connection error: %v", err) h.Responder.Error(w, req, err) return true } defer backendConn.Close() // determine the http response code from the backend by reading from rawResponse+backendConn - rawResponseCode, headerBytes, err := getResponseCode(io.MultiReader(bytes.NewReader(rawResponse), backendConn)) + backendHTTPResponse, headerBytes, err := getResponse(io.MultiReader(bytes.NewReader(rawResponse), backendConn)) if err != nil { - glog.V(6).Infof("Proxy connection error: %v", err) + klog.V(6).Infof("Proxy connection error: %v", err) h.Responder.Error(w, req, err) return true } @@ -288,37 +288,42 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques // hijacking should be the last step in the upgrade. requestHijacker, ok := w.(http.Hijacker) if !ok { - glog.V(6).Infof("Unable to hijack response writer: %T", w) + klog.V(6).Infof("Unable to hijack response writer: %T", w) h.Responder.Error(w, req, fmt.Errorf("request connection cannot be hijacked: %T", w)) return true } requestHijackedConn, _, err := requestHijacker.Hijack() if err != nil { - glog.V(6).Infof("Unable to hijack response: %v", err) + klog.V(6).Infof("Unable to hijack response: %v", err) h.Responder.Error(w, req, fmt.Errorf("error hijacking connection: %v", err)) return true } defer requestHijackedConn.Close() + if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols { + // If the backend did not upgrade the request, echo the response from the backend to the client and return, closing the connection. + klog.V(6).Infof("Proxy upgrade error, status code %d", backendHTTPResponse.StatusCode) + // set read/write deadlines + deadline := time.Now().Add(10 * time.Second) + backendConn.SetReadDeadline(deadline) + requestHijackedConn.SetWriteDeadline(deadline) + // write the response to the client + err := backendHTTPResponse.Write(requestHijackedConn) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + klog.Errorf("Error proxying data from backend to client: %v", err) + } + // Indicate we handled the request + return true + } + // Forward raw response bytes back to client. if len(rawResponse) > 0 { - glog.V(6).Infof("Writing %d bytes to hijacked connection", len(rawResponse)) + klog.V(6).Infof("Writing %d bytes to hijacked connection", len(rawResponse)) if _, err = requestHijackedConn.Write(rawResponse); err != nil { utilruntime.HandleError(fmt.Errorf("Error proxying response from backend to client: %v", err)) } } - if rawResponseCode != http.StatusSwitchingProtocols { - // If the backend did not upgrade the request, finish echoing the response from the backend to the client and return, closing the connection. - glog.V(6).Infof("Proxy upgrade error, status code %d", rawResponseCode) - _, err := io.Copy(requestHijackedConn, backendConn) - if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - glog.Errorf("Error proxying data from backend to client: %v", err) - } - // Indicate we handled the request - return true - } - // Proxy the connection. This is bidirectional, so we need a goroutine // to copy in each direction. Once one side of the connection exits, we // exit the function which performs cleanup and in the process closes @@ -335,7 +340,7 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques } _, err := io.Copy(writer, requestHijackedConn) if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - glog.Errorf("Error proxying data from client to backend: %v", err) + klog.Errorf("Error proxying data from client to backend: %v", err) } close(writerComplete) }() @@ -349,7 +354,7 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques } _, err := io.Copy(requestHijackedConn, reader) if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - glog.Errorf("Error proxying data from backend to client: %v", err) + klog.Errorf("Error proxying data from backend to client: %v", err) } close(readerComplete) }() @@ -360,7 +365,7 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques case <-writerComplete: case <-readerComplete: } - glog.V(6).Infof("Disconnecting from backend proxy %s\n Headers: %v", &location, clone.Header) + klog.V(6).Infof("Disconnecting from backend proxy %s\n Headers: %v", &location, clone.Header) return true } @@ -380,17 +385,17 @@ func (h *UpgradeAwareHandler) DialForUpgrade(req *http.Request) (net.Conn, error return dial(updatedReq, h.UpgradeTransport) } -// getResponseCode reads a http response from the given reader, returns the status code, +// getResponseCode reads a http response from the given reader, returns the response, // the bytes read from the reader, and any error encountered -func getResponseCode(r io.Reader) (int, []byte, error) { +func getResponse(r io.Reader) (*http.Response, []byte, error) { rawResponse := bytes.NewBuffer(make([]byte, 0, 256)) // Save the bytes read while reading the response headers into the rawResponse buffer resp, err := http.ReadResponse(bufio.NewReader(io.TeeReader(r, rawResponse)), nil) if err != nil { - return 0, nil, err + return nil, nil, err } - // return the http status code and the raw bytes consumed from the reader in the process - return resp.StatusCode, rawResponse.Bytes(), nil + // return the http response and the raw bytes consumed from the reader in the process + return resp, rawResponse.Bytes(), nil } // dial dials the backend at req.URL and writes req to it. diff --git a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go index 9421edae866ad..82a473bb146f8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go +++ b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go @@ -27,7 +27,14 @@ var rng = struct { sync.Mutex rand *rand.Rand }{ - rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())), + rand: rand.New(rand.NewSource(time.Now().UnixNano())), +} + +// Int returns a non-negative pseudo-random int. +func Int() int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Int() } // Intn generates an integer in range [0,max). diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD.bazel index c6eb67e279988..dd2af1e9a8fe0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD.bazel +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD.bazel @@ -6,5 +6,5 @@ go_library( importmap = "k8s.io/kops/vendor/k8s.io/apimachinery/pkg/util/runtime", importpath = "k8s.io/apimachinery/pkg/util/runtime", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index da32fe12f33b8..8e34f92613979 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -63,7 +63,11 @@ func HandleCrash(additionalHandlers ...func(interface{})) { // logPanic logs the caller tree when a panic occurs. func logPanic(r interface{}) { callers := getCallers(r) - glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) + if _, ok := r.(string); ok { + klog.Errorf("Observed a panic: %s\n%v", r, callers) + } else { + klog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) + } } func getCallers(r interface{}) string { @@ -111,7 +115,7 @@ func HandleError(err error) { // logError prints an error with the call stack of the location it was reported func logError(err error) { - glog.ErrorDepth(2, err) + klog.ErrorDepth(2, err) } type rudimentaryErrorBackoff struct { diff --git a/vendor/k8s.io/kubernetes/pkg/util/version/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/version/BUILD.bazel similarity index 60% rename from vendor/k8s.io/kubernetes/pkg/util/version/BUILD.bazel rename to vendor/k8s.io/apimachinery/pkg/util/version/BUILD.bazel index 0e01e65b4834e..178c5b3d07b67 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/version/BUILD.bazel +++ b/vendor/k8s.io/apimachinery/pkg/util/version/BUILD.bazel @@ -6,7 +6,7 @@ go_library( "doc.go", "version.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/util/version", - importpath = "k8s.io/kubernetes/pkg/util/version", + importmap = "k8s.io/kops/vendor/k8s.io/apimachinery/pkg/util/version", + importpath = "k8s.io/apimachinery/pkg/util/version", visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/version/doc.go b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/util/version/doc.go rename to vendor/k8s.io/apimachinery/pkg/util/version/doc.go index ebe43152e8e76..5b2b22b6d00cd 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/version/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package version provides utilities for version number comparisons -package version // import "k8s.io/kubernetes/pkg/util/version" +package version // import "k8s.io/apimachinery/pkg/util/version" diff --git a/vendor/k8s.io/kubernetes/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/util/version/version.go rename to vendor/k8s.io/apimachinery/pkg/util/version/version.go diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD.bazel index afea0e796d44e..21c65e63f2fa1 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD.bazel +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD.bazel @@ -7,7 +7,7 @@ go_library( importpath = "k8s.io/apimachinery/pkg/util/yaml", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 3cd85515d433b..63d735a804cf8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -26,8 +26,8 @@ import ( "strings" "unicode" - "github.com/ghodss/yaml" - "github.com/golang/glog" + "k8s.io/klog" + "sigs.k8s.io/yaml" ) // ToJSON converts a single YAML document into a JSON document @@ -217,11 +217,11 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if d.decoder == nil { buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize) if isJSON { - glog.V(4).Infof("decoding stream as JSON") + klog.V(4).Infof("decoding stream as JSON") d.decoder = json.NewDecoder(buffer) d.rawData = origData } else { - glog.V(4).Infof("decoding stream as YAML") + klog.V(4).Infof("decoding stream as YAML") d.decoder = NewYAMLToJSONDecoder(buffer) } } @@ -230,7 +230,7 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if syntax, ok := err.(*json.SyntaxError); ok { data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) if readErr != nil { - glog.V(4).Infof("reading stream failed: %v", readErr) + klog.V(4).Infof("reading stream failed: %v", readErr) } js := string(data) diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go index 5e77af7ea9a25..29574fd6d5894 100644 --- a/vendor/k8s.io/apimachinery/pkg/version/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package version supplies the type for version information collected at build time. // +k8s:openapi-gen=true + +// Package version supplies the type for version information collected at build time. package version // import "k8s.io/apimachinery/pkg/version" diff --git a/vendor/k8s.io/apimachinery/pkg/watch/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/watch/BUILD.bazel index 6f2629063506d..0fe3bb93c8d80 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/BUILD.bazel +++ b/vendor/k8s.io/apimachinery/pkg/watch/BUILD.bazel @@ -14,10 +14,10 @@ go_library( importpath = "k8s.io/apimachinery/pkg/watch", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 93bb1cdf7f6bb..d61cf5a2e58b6 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -20,10 +20,10 @@ import ( "io" "sync" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" ) // Decoder allows StreamWatcher to watch any stream for which a Decoder can be written. @@ -100,13 +100,13 @@ func (sw *StreamWatcher) receive() { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: - glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) + klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) default: msg := "Unable to decode an event from the watch stream: %v" if net.IsProbableEOF(err) { - glog.V(5).Infof(msg, err) + klog.V(5).Infof(msg, err) } else { - glog.Errorf(msg, err) + klog.Errorf(msg, err) } } return diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index a627d1d572c3d..be9c90c03d108 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -20,7 +20,7 @@ import ( "fmt" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" ) @@ -106,7 +106,7 @@ func (f *FakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { - glog.V(4).Infof("Stopping fake watcher.") + klog.V(4).Infof("Stopping fake watcher.") close(f.result) f.Stopped = true } @@ -173,7 +173,7 @@ func (f *RaceFreeFakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { - glog.V(4).Infof("Stopping fake watcher.") + klog.V(4).Infof("Stopping fake watcher.") close(f.result) f.Stopped = true } diff --git a/vendor/k8s.io/apiserver/pkg/admission/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/BUILD.bazel index f61136dd5509d..caeec080063df 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/admission/BUILD.bazel @@ -17,8 +17,6 @@ go_library( importpath = "k8s.io/apiserver/pkg/admission", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -32,5 +30,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", "//vendor/k8s.io/apiserver/pkg/audit:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/admission/config.go b/vendor/k8s.io/apiserver/pkg/admission/config.go index f59d0608bcedf..ffda2f3262c88 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/config.go +++ b/vendor/k8s.io/apiserver/pkg/admission/config.go @@ -25,8 +25,8 @@ import ( "path" "path/filepath" - "github.com/ghodss/yaml" - "github.com/golang/glog" + "k8s.io/klog" + "sigs.k8s.io/yaml" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -146,7 +146,7 @@ func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfi if pluginCfg.Path != "" { content, err := ioutil.ReadFile(pluginCfg.Path) if err != nil { - glog.Fatalf("Couldn't open admission plugin configuration %s: %#v", pluginCfg.Path, err) + klog.Fatalf("Couldn't open admission plugin configuration %s: %#v", pluginCfg.Path, err) return nil, err } return bytes.NewBuffer(content), nil diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD.bazel index 20af72bb8ff94..e27b63ba99501 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD.bazel @@ -12,7 +12,6 @@ go_library( importpath = "k8s.io/apiserver/pkg/admission/configuration", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -25,5 +24,6 @@ go_library( "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/listers/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/initializer_manager.go b/vendor/k8s.io/apiserver/pkg/admission/configuration/initializer_manager.go index 986524b5ba2e2..f2b7e909942e1 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/configuration/initializer_manager.go +++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/initializer_manager.go @@ -21,7 +21,7 @@ import ( "reflect" "sort" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/admissionregistration/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" @@ -42,7 +42,7 @@ func NewInitializerConfigurationManager(c InitializerConfigurationLister) *Initi list, err := c.List(metav1.ListOptions{}) if err != nil { if errors.IsNotFound(err) || errors.IsForbidden(err) { - glog.V(5).Infof("Initializers are disabled due to an error: %v", err) + klog.V(5).Infof("Initializers are disabled due to an error: %v", err) return nil, ErrDisabled } return nil, err diff --git a/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go index 0955a98c9b88a..a5ab97a74d52b 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go @@ -112,17 +112,17 @@ func newAdmissionMetrics() *AdmissionMetrics { // Admission metrics for a step of the admission flow. The entire admission flow is broken down into a series of steps // Each step is identified by a distinct type label value. step := newMetricSet("step", - []string{"type", "operation", "group", "version", "resource", "subresource", "rejected"}, + []string{"type", "operation", "rejected"}, "Admission sub-step %s, broken out for each operation and API resource and step type (validate or admit).", true) // Built-in admission controller metrics. Each admission controller is identified by name. controller := newMetricSet("controller", - []string{"name", "type", "operation", "group", "version", "resource", "subresource", "rejected"}, + []string{"name", "type", "operation", "rejected"}, "Admission controller %s, identified by name and broken out for each operation and API resource and type (validate or admit).", false) // Admission webhook metrics. Each webhook is identified by name. webhook := newMetricSet("webhook", - []string{"name", "type", "operation", "group", "version", "resource", "subresource", "rejected"}, + []string{"name", "type", "operation", "rejected"}, "Admission webhook %s, identified by name and broken out for each operation and API resource and type (validate or admit).", false) step.mustRegister() @@ -139,20 +139,17 @@ func (m *AdmissionMetrics) reset() { // ObserveAdmissionStep records admission related metrics for a admission step, identified by step type. func (m *AdmissionMetrics) ObserveAdmissionStep(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { - gvr := attr.GetResource() - m.step.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))...) + m.step.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) } // ObserveAdmissionController records admission related metrics for a built-in admission controller, identified by it's plugin handler name. func (m *AdmissionMetrics) ObserveAdmissionController(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { - gvr := attr.GetResource() - m.controller.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))...) + m.controller.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) } // ObserveWebhook records admission related metrics for a admission webhook. func (m *AdmissionMetrics) ObserveWebhook(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { - gvr := attr.GetResource() - m.webhook.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))...) + m.webhook.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) } type metricSet struct { diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD.bazel index 852ccf4d641d7..1e0eecfb8837d 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD.bazel @@ -7,7 +7,6 @@ go_library( importpath = "k8s.io/apiserver/pkg/admission/plugin/initialization", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -23,5 +22,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/features:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go index 1bb59da5d5398..d4d184a57478a 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go @@ -21,7 +21,7 @@ import ( "io" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/admissionregistration/v1alpha1" "k8s.io/api/core/v1" @@ -56,6 +56,7 @@ type initializerOptions struct { Initializers []string } +// InitializationConfig specifies initialization config type InitializationConfig interface { Run(stopCh <-chan struct{}) Initializers() (*v1alpha1.InitializerConfiguration, error) @@ -85,9 +86,9 @@ func (i *initializer) ValidateInitialization() error { if !utilfeature.DefaultFeatureGate.Enabled(features.Initializers) { if err := utilfeature.DefaultFeatureGate.Set(string(features.Initializers) + "=true"); err != nil { - glog.Errorf("error enabling Initializers feature as part of admission plugin setup: %v", err) + klog.Errorf("error enabling Initializers feature as part of admission plugin setup: %v", err) } else { - glog.Infof("enabled Initializers feature as part of admission plugin setup") + klog.Infof("enabled Initializers feature as part of admission plugin setup") } } @@ -169,7 +170,7 @@ func (i *initializer) Admit(a admission.Attributes) (err error) { } existing := accessor.GetInitializers() if existing != nil { - glog.V(5).Infof("Admin bypassing initialization for %s", a.GetResource()) + klog.V(5).Infof("Admin bypassing initialization for %s", a.GetResource()) // it must be possible for some users to bypass initialization - for now, check the initialize operation if err := i.canInitialize(a, "create with initializers denied"); err != nil { @@ -181,7 +182,7 @@ func (i *initializer) Admit(a admission.Attributes) (err error) { return nil } } else { - glog.V(5).Infof("Checking initialization for %s", a.GetResource()) + klog.V(5).Infof("Checking initialization for %s", a.GetResource()) config, err := i.readConfig(a) if err != nil { @@ -204,11 +205,11 @@ func (i *initializer) Admit(a admission.Attributes) (err error) { names := findInitializers(config, a.GetResource()) if len(names) == 0 { - glog.V(5).Infof("No initializers needed") + klog.V(5).Infof("No initializers needed") return nil } - glog.V(5).Infof("Found initializers for %s: %v", a.GetResource(), names) + klog.V(5).Infof("Found initializers for %s: %v", a.GetResource(), names) accessor.SetInitializers(newInitializers(names)) } @@ -240,7 +241,7 @@ func (i *initializer) Admit(a admission.Attributes) (err error) { return nil } - glog.V(5).Infof("Modifying uninitialized resource %s", a.GetResource()) + klog.V(5).Infof("Modifying uninitialized resource %s", a.GetResource()) // because we are called before validation, we need to ensure the update transition is valid. if errs := validation.ValidateInitializersUpdate(updated, existing, initializerFieldPath); len(errs) > 0 { diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD.bazel index 306f6f473fb0a..6d3e936ac9195 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD.bazel @@ -7,7 +7,6 @@ go_library( importpath = "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -20,5 +19,6 @@ go_library( "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go index 81c24f6a5a683..d7bb0215b981e 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -21,7 +21,7 @@ import ( "io" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -38,7 +38,7 @@ import ( ) const ( - // Name of admission plug-in + // PluginName indicates the name of admission plug-in PluginName = "NamespaceLifecycle" // how long a namespace stays in the force live lookup cache before expiration. forceLiveLookupTTL = 30 * time.Second @@ -72,6 +72,7 @@ type Lifecycle struct { var _ = initializer.WantsExternalKubeInformerFactory(&Lifecycle{}) var _ = initializer.WantsExternalKubeClientSet(&Lifecycle{}) +// Admit makes an admission decision based on the request attributes func (l *Lifecycle) Admit(a admission.Attributes) error { // prevent deletion of immortal namespaces if a.GetOperation() == admission.Delete && a.GetKind().GroupKind() == v1.SchemeGroupVersion.WithKind("Namespace").GroupKind() && l.immortalNamespaces.Has(a.GetName()) { @@ -138,7 +139,7 @@ func (l *Lifecycle) Admit(a admission.Attributes) error { exists = true } if exists { - glog.V(4).Infof("found %s in cache after waiting", a.GetNamespace()) + klog.V(4).Infof("found %s in cache after waiting", a.GetNamespace()) } } @@ -159,7 +160,7 @@ func (l *Lifecycle) Admit(a admission.Attributes) error { case err != nil: return errors.NewInternalError(err) } - glog.V(4).Infof("found %s via storage lookup", a.GetNamespace()) + klog.V(4).Infof("found %s via storage lookup", a.GetNamespace()) } // ensure that we're not trying to create objects in terminating namespaces diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go index 04c376f779509..703f467f9fcc7 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission // +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.config.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=apiserver.config.k8s.io package v1alpha1 diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go index a75c63fa9feec..050c31730fe20 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go @@ -43,6 +43,8 @@ func (c *convertor) ConvertToGVK(obj runtime.Object, gvk schema.GroupVersionKind if err != nil { return nil, err } + // Explicitly set the GVK + out.GetObjectKind().SetGroupVersionKind(gvk) return out, nil } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go index 408187fd1f7a9..13b898bca9b95 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go @@ -164,6 +164,7 @@ func (a *Webhook) Dispatch(attr admission.Attributes) error { return admission.NewForbidden(attr, fmt.Errorf("not yet ready to handle request")) } hooks := a.hookSource.Webhooks() + // TODO: Figure out if adding one second timeout make sense here. ctx := context.TODO() var relevantHooks []*v1beta1.Webhook diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD.bazel index 4e15ddc5a04d1..4c9653cce1117 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD.bazel @@ -12,7 +12,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/admission/v1beta1:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -28,5 +27,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/util:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/webhook:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go index 4f95a6adf9ada..d646bacb535d1 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go @@ -24,7 +24,7 @@ import ( "time" jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" + "k8s.io/klog" admissionv1beta1 "k8s.io/api/admission/v1beta1" "k8s.io/api/admissionregistration/v1beta1" @@ -65,11 +65,11 @@ func (a *mutatingDispatcher) Dispatch(ctx context.Context, attr *generic.Version ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == v1beta1.Ignore if callErr, ok := err.(*webhook.ErrCallingWebhook); ok { if ignoreClientCallFailures { - glog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) + klog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) utilruntime.HandleError(callErr) continue } - glog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) + klog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) } return apierrors.NewInternalError(err) } @@ -110,7 +110,7 @@ func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *v1beta for k, v := range response.Response.AuditAnnotations { key := h.Name + "/" + k if err := attr.AddAnnotation(key, v); err != nil { - glog.Warningf("Failed to set admission audit annotation %s to %s for mutating webhook %s: %v", key, v, h.Name, err) + klog.Warningf("Failed to set admission audit annotation %s to %s for mutating webhook %s: %v", key, v, h.Name, err) } } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/plugin.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/plugin.go index f03b1b3422aa5..33572b24b5b4a 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/plugin.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/plugin.go @@ -28,7 +28,7 @@ import ( ) const ( - // Name of admission plug-in + // PluginName indicates the name of admission plug-in PluginName = "MutatingAdmissionWebhook" ) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD.bazel index ef89ffa31980f..a4876d8dc13e0 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD.bazel @@ -11,7 +11,6 @@ go_library( importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/validating", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/admission/v1beta1:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -24,5 +23,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/util:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/webhook:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go index 42e4262d090c2..166e21adcdf8f 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" admissionv1beta1 "k8s.io/api/admission/v1beta1" "k8s.io/api/admissionregistration/v1beta1" @@ -64,17 +64,17 @@ func (d *validatingDispatcher) Dispatch(ctx context.Context, attr *generic.Versi ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == v1beta1.Ignore if callErr, ok := err.(*webhook.ErrCallingWebhook); ok { if ignoreClientCallFailures { - glog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) + klog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) utilruntime.HandleError(callErr) return } - glog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) + klog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) errCh <- apierrors.NewInternalError(err) return } - glog.Warningf("rejected by webhook %q: %#v", hook.Name, err) + klog.Warningf("rejected by webhook %q: %#v", hook.Name, err) errCh <- err }(relevantHooks[i]) } @@ -124,7 +124,7 @@ func (d *validatingDispatcher) callHook(ctx context.Context, h *v1beta1.Webhook, for k, v := range response.Response.AuditAnnotations { key := h.Name + "/" + k if err := attr.AddAnnotation(key, v); err != nil { - glog.Warningf("Failed to set admission audit annotation %s to %s for validating webhook %s: %v", key, v, h.Name, err) + klog.Warningf("Failed to set admission audit annotation %s to %s for validating webhook %s: %v", key, v, h.Name, err) } } if response.Response.Allowed { diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/plugin.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/plugin.go index 8417ccffb1797..7f79b9d7a0820 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/plugin.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/plugin.go @@ -25,7 +25,7 @@ import ( ) const ( - // Name of admission plug-in + // PluginName indicates the name of admission plug-in PluginName = "ValidatingAdmissionWebhook" ) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugins.go b/vendor/k8s.io/apiserver/pkg/admission/plugins.go index c17d62cd4e67b..bdf087e564f4b 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugins.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugins.go @@ -26,7 +26,7 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // Factory is a function that returns an Interface for admission decisions. @@ -75,13 +75,13 @@ func (ps *Plugins) Register(name string, plugin Factory) { if ps.registry != nil { _, found := ps.registry[name] if found { - glog.Fatalf("Admission plugin %q was registered twice", name) + klog.Fatalf("Admission plugin %q was registered twice", name) } } else { ps.registry = map[string]Factory{} } - glog.V(1).Infof("Registered admission plugin %q", name) + klog.V(1).Infof("Registered admission plugin %q", name) ps.registry[name] = plugin } @@ -155,10 +155,10 @@ func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigPro } } if len(mutationPlugins) != 0 { - glog.Infof("Loaded %d mutating admission controller(s) successfully in the following order: %s.", len(mutationPlugins), strings.Join(mutationPlugins, ",")) + klog.Infof("Loaded %d mutating admission controller(s) successfully in the following order: %s.", len(mutationPlugins), strings.Join(mutationPlugins, ",")) } if len(validationPlugins) != 0 { - glog.Infof("Loaded %d validating admission controller(s) successfully in the following order: %s.", len(validationPlugins), strings.Join(validationPlugins, ",")) + klog.Infof("Loaded %d validating admission controller(s) successfully in the following order: %s.", len(validationPlugins), strings.Join(validationPlugins, ",")) } return chainAdmissionHandler(handlers), nil } @@ -166,7 +166,7 @@ func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigPro // InitPlugin creates an instance of the named interface. func (ps *Plugins) InitPlugin(name string, config io.Reader, pluginInitializer PluginInitializer) (Interface, error) { if name == "" { - glog.Info("No admission plugin specified.") + klog.Info("No admission plugin specified.") return nil, nil } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go index a89863a35fc5f..88db1ffa67af4 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +groupName=apiserver.k8s.io // Package apiserver is the internal version of the API. -// +groupName=apiserver.k8s.io package apiserver // import "k8s.io/apiserver/pkg/apis/apiserver" diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go index 7dd031a793cb1..82ebd0c455d13 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver // +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=apiserver.k8s.io package v1alpha1 // import "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS b/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS new file mode 100644 index 0000000000000..8389778900fe4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS @@ -0,0 +1,7 @@ +# approval on api packages bubbles to api-approvers +reviewers: +- sig-auth-audit-approvers +- sig-auth-audit-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go b/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go index 34bc671e8d148..deda9cbd63c83 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=audit.k8s.io + package audit // import "k8s.io/apiserver/pkg/apis/audit" diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go index 9d93625481775..b8f818ffdb207 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=audit.k8s.io + package v1 // import "k8s.io/apiserver/pkg/apis/audit/v1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go index 756ea30baa98a..7d4ce7573df56 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -610,24 +609,6 @@ func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1455,51 +1436,14 @@ func (m *Event) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1509,41 +1453,80 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 16: if wireType != 2 { diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto index 4baad752efe51..c7242222c9461 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto @@ -121,17 +121,17 @@ message GroupResources { optional string group = 1; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' matches pods. // 'pods/log' matches the log subresource of pods. // '*' matches all resources and their subresources. // 'pods/*' matches all subresources of pods. // '*/scale' matches all scale subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // An empty list implies all resources and subresources in this API groups apply. // +optional repeated string resources = 2; diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go index 27cc4c5ea52bb..d2cbdd9919bff 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=audit.k8s.io + package v1alpha1 // import "k8s.io/apiserver/pkg/apis/audit/v1alpha1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go index 73a993c25c3b4..6ef668a9d964f 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -626,24 +625,6 @@ func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1534,51 +1515,14 @@ func (m *Event) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1588,41 +1532,80 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 18: if wireType != 2 { diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto index 507f5889b73e6..2a0773d1967cb 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto @@ -128,17 +128,17 @@ message GroupResources { optional string group = 1; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' matches pods. // 'pods/log' matches the log subresource of pods. // '*' matches all resources and their subresources. // 'pods/*' matches all subresources of pods. // '*/scale' matches all scale subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // An empty list implies all resources and subresources in this API groups apply. // +optional repeated string resources = 2; diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go index 3881472586767..d43a807c3de54 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=audit.k8s.io + package v1beta1 // import "k8s.io/apiserver/pkg/apis/audit/v1beta1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go index 53d25d9c36f06..ecc26c2e73570 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -630,24 +629,6 @@ func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1541,51 +1522,14 @@ func (m *Event) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1595,41 +1539,80 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 18: if wireType != 2 { diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto index 2ea4c6a605926..23ed8910aa845 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto @@ -131,17 +131,17 @@ message GroupResources { optional string group = 1; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' matches pods. // 'pods/log' matches the log subresource of pods. // '*' matches all resources and their subresources. // 'pods/*' matches all subresources of pods. // '*/scale' matches all scale subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // An empty list implies all resources and subresources in this API groups apply. // +optional repeated string resources = 2; diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go b/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go index f80aba01ffec4..397317f23b2f3 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go @@ -24,6 +24,7 @@ import ( "k8s.io/apiserver/pkg/apis/audit" ) +// ValidatePolicy validates the audit policy func ValidatePolicy(policy *audit.Policy) field.ErrorList { var allErrs field.ErrorList allErrs = append(allErrs, validateOmitStages(policy.OmitStages, field.NewPath("omitStages"))...) diff --git a/vendor/k8s.io/apiserver/pkg/audit/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/audit/BUILD.bazel index 437712a29e663..a000ee2e142ef 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/audit/BUILD.bazel @@ -14,7 +14,6 @@ go_library( importpath = "k8s.io/apiserver/pkg/audit", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -31,5 +30,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/audit/OWNERS b/vendor/k8s.io/apiserver/pkg/audit/OWNERS new file mode 100644 index 0000000000000..178ce84a5ce3e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-audit-approvers +reviewers: +- sig-auth-audit-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/apiserver/pkg/audit/event/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/audit/event/BUILD.bazel new file mode 100644 index 0000000000000..dddfceb58b014 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/event/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["attributes.go"], + importmap = "k8s.io/kops/vendor/k8s.io/apiserver/pkg/audit/event", + importpath = "k8s.io/apiserver/pkg/audit/event", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", + "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", + "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", + ], +) diff --git a/vendor/k8s.io/apiserver/pkg/audit/event/attributes.go b/vendor/k8s.io/apiserver/pkg/audit/event/attributes.go new file mode 100644 index 0000000000000..576b8db84824d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/event/attributes.go @@ -0,0 +1,147 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package event + +import ( + "fmt" + "net/url" + + "k8s.io/apiserver/pkg/apis/audit" + authuser "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +var _ authorizer.Attributes = &attributes{} + +// attributes implements the authorizer attributes interface +// with event data. This is used for enforced audit backends +type attributes struct { + event *audit.Event + path string +} + +// NewAttributes returns a new attributes struct and parsed request uri +// if needed +func NewAttributes(event *audit.Event) (authorizer.Attributes, error) { + a := attributes{ + event: event, + } + if event.ObjectRef == nil { + u, err := url.ParseRequestURI(a.event.RequestURI) + if err != nil { + return nil, fmt.Errorf("could not parse url: %v", err) + } + a.path = u.Path + } + return &a, nil +} + +// GetUser returns the user. This is only used for checking audit policy, +// and the audit policy user check is based off the original user, +// not the impersonated user. +func (a *attributes) GetUser() authuser.Info { + return user(a.event.User) +} + +// GetVerb returns the verb +func (a *attributes) GetVerb() string { + return a.event.Verb +} + +// IsReadOnly determines if the verb is a read only action +func (a *attributes) IsReadOnly() bool { + return a.event.Verb == "get" || a.event.Verb == "list" || a.event.Verb == "watch" +} + +// GetNamespace returns the object namespace if present +func (a *attributes) GetNamespace() string { + if a.event.ObjectRef == nil { + return "" + } + return a.event.ObjectRef.Namespace +} + +// GetResource returns the object resource if present +func (a *attributes) GetResource() string { + if a.event.ObjectRef == nil { + return "" + } + return a.event.ObjectRef.Resource +} + +// GetSubresource returns the object subresource if present +func (a *attributes) GetSubresource() string { + if a.event.ObjectRef == nil { + return "" + } + return a.event.ObjectRef.Subresource +} + +// GetName returns the object name if present +func (a *attributes) GetName() string { + if a.event.ObjectRef == nil { + return "" + } + return a.event.ObjectRef.Name +} + +// GetAPIGroup returns the object api group if present +func (a *attributes) GetAPIGroup() string { + if a.event.ObjectRef == nil { + return "" + } + return a.event.ObjectRef.APIGroup +} + +// GetAPIVersion returns the object api version if present +func (a *attributes) GetAPIVersion() string { + if a.event.ObjectRef == nil { + return "" + } + return a.event.ObjectRef.APIVersion +} + +// IsResourceRequest determines if the request was acted on a resource +func (a *attributes) IsResourceRequest() bool { + return a.event.ObjectRef != nil +} + +// GetPath returns the path uri accessed +func (a *attributes) GetPath() string { + return a.path +} + +// user represents the event user +type user audit.UserInfo + +// GetName returns the user name +func (u user) GetName() string { return u.Username } + +// GetUID returns the user uid +func (u user) GetUID() string { return u.UID } + +// GetGroups returns the user groups +func (u user) GetGroups() []string { return u.Groups } + +// GetExtra returns the user extra data +func (u user) GetExtra() map[string][]string { + m := map[string][]string{} + for k, v := range u.Extra { + m[k] = []string(v) + } + return m +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/metrics.go b/vendor/k8s.io/apiserver/pkg/audit/metrics.go index 10280e0d88e91..9b81b30cc265a 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/audit/metrics.go @@ -19,9 +19,9 @@ package audit import ( "fmt" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/klog" ) const ( @@ -52,12 +52,22 @@ var ( }, []string{"level"}, ) + + ApiserverAuditDroppedCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Subsystem: subsystem, + Name: "requests_rejected_total", + Help: "Counter of apiserver requests rejected due to an error " + + "in audit logging backend.", + }, + ) ) func init() { prometheus.MustRegister(eventCounter) prometheus.MustRegister(errorCounter) prometheus.MustRegister(levelCounter) + prometheus.MustRegister(ApiserverAuditDroppedCounter) } // ObserveEvent updates the relevant prometheus metrics for the generated audit event. @@ -83,5 +93,5 @@ func HandlePluginError(plugin string, err error, impacted ...*auditinternal.Even for _, ev := range impacted { msg = msg + EventString(ev) + "\n" } - glog.Error(msg) + klog.Error(msg) } diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/audit/policy/BUILD.bazel index 61d71d4653658..2a6069f61bb1f 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/policy/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/BUILD.bazel @@ -4,14 +4,18 @@ go_library( name = "go_default_library", srcs = [ "checker.go", + "dynamic.go", + "enforce.go", "reader.go", + "util.go", ], importmap = "k8s.io/kops/vendor/k8s.io/apiserver/pkg/audit/policy", importpath = "k8s.io/apiserver/pkg/audit/policy", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/auditregistration/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1:go_default_library", @@ -19,5 +23,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/apis/audit/validation:go_default_library", "//vendor/k8s.io/apiserver/pkg/audit:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/dynamic.go b/vendor/k8s.io/apiserver/pkg/audit/policy/dynamic.go new file mode 100644 index 0000000000000..4b5f29a1132b7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/dynamic.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "k8s.io/api/auditregistration/v1alpha1" + "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +// ConvertDynamicPolicyToInternal constructs an internal policy type from a +// v1alpha1 dynamic type +func ConvertDynamicPolicyToInternal(p *v1alpha1.Policy) *audit.Policy { + stages := make([]audit.Stage, len(p.Stages)) + for i, stage := range p.Stages { + stages[i] = audit.Stage(stage) + } + return &audit.Policy{ + Rules: []audit.PolicyRule{ + { + Level: audit.Level(p.Level), + }, + }, + OmitStages: InvertStages(stages), + } +} + +// NewDynamicChecker returns a new dynamic policy checker +func NewDynamicChecker() Checker { + return &dynamicPolicyChecker{} +} + +type dynamicPolicyChecker struct{} + +// LevelAndStages returns returns a fixed level of the full event, this is so that the downstream policy +// can be applied per sink. +// TODO: this needs benchmarking before the API moves to beta to determine the effect this has on the apiserver +func (d *dynamicPolicyChecker) LevelAndStages(authorizer.Attributes) (audit.Level, []audit.Stage) { + return audit.LevelRequestResponse, []audit.Stage{} +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/enforce.go b/vendor/k8s.io/apiserver/pkg/audit/policy/enforce.go new file mode 100644 index 0000000000000..e2b107b9f1dac --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/enforce.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "fmt" + + "k8s.io/apiserver/pkg/apis/audit" +) + +// EnforcePolicy drops any part of the event that doesn't conform to a policy level +// or omitStages and sets the event level accordingly +func EnforcePolicy(event *audit.Event, level audit.Level, omitStages []audit.Stage) (*audit.Event, error) { + for _, stage := range omitStages { + if event.Stage == stage { + return nil, nil + } + } + return enforceLevel(event, level) +} + +func enforceLevel(event *audit.Event, level audit.Level) (*audit.Event, error) { + switch level { + case audit.LevelMetadata: + event.Level = audit.LevelMetadata + event.ResponseObject = nil + event.RequestObject = nil + case audit.LevelRequest: + event.Level = audit.LevelRequest + event.ResponseObject = nil + case audit.LevelRequestResponse: + event.Level = audit.LevelRequestResponse + case audit.LevelNone: + return nil, nil + default: + return nil, fmt.Errorf("level unknown: %s", level) + } + return event, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go b/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go index d582cda88d3d9..3d669fe699dc1 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go @@ -28,7 +28,7 @@ import ( "k8s.io/apiserver/pkg/apis/audit/validation" "k8s.io/apiserver/pkg/audit" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -55,17 +55,26 @@ func LoadPolicyFromFile(filePath string) (*auditinternal.Policy, error) { return nil, fmt.Errorf("failed to read file path %q: %+v", filePath, err) } + ret, err := LoadPolicyFromBytes(policyDef) + if err != nil { + return nil, fmt.Errorf("%v: from file %v", err.Error(), filePath) + } + + return ret, nil +} + +func LoadPolicyFromBytes(policyDef []byte) (*auditinternal.Policy, error) { policy := &auditinternal.Policy{} decoder := audit.Codecs.UniversalDecoder(apiGroupVersions...) _, gvk, err := decoder.Decode(policyDef, nil, policy) if err != nil { - return nil, fmt.Errorf("failed decoding file %q: %v", filePath, err) + return nil, fmt.Errorf("failed decoding: %v", err) } // Ensure the policy file contained an apiVersion and kind. if !apiGroupVersionSet[schema.GroupVersion{Group: gvk.Group, Version: gvk.Version}] { - return nil, fmt.Errorf("unknown group version field %v in policy file %s", gvk, filePath) + return nil, fmt.Errorf("unknown group version field %v in policy", gvk) } if err := validation.ValidatePolicy(policy); err != nil { @@ -74,8 +83,8 @@ func LoadPolicyFromFile(filePath string) (*auditinternal.Policy, error) { policyCnt := len(policy.Rules) if policyCnt == 0 { - return nil, fmt.Errorf("loaded illegal policy with 0 rules from file %s", filePath) + return nil, fmt.Errorf("loaded illegal policy with 0 rules") } - glog.V(4).Infof("Loaded %d audit policy rules from file %s", policyCnt, filePath) + klog.V(4).Infof("Loaded %d audit policy rules", policyCnt) return policy, nil } diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/util.go b/vendor/k8s.io/apiserver/pkg/audit/policy/util.go new file mode 100644 index 0000000000000..29be912303c0c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/util.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/apis/audit" +) + +// AllStages returns all possible stages +func AllStages() sets.String { + return sets.NewString( + audit.StageRequestReceived, + audit.StageResponseStarted, + audit.StageResponseComplete, + audit.StagePanic, + ) +} + +// AllLevels returns all possible levels +func AllLevels() sets.String { + return sets.NewString( + string(audit.LevelNone), + string(audit.LevelMetadata), + string(audit.LevelRequest), + string(audit.LevelRequestResponse), + ) +} + +// InvertStages subtracts the given array of stages from all stages +func InvertStages(stages []audit.Stage) []audit.Stage { + s := ConvertStagesToStrings(stages) + a := AllStages() + a.Delete(s...) + return ConvertStringSetToStages(a) +} + +// ConvertStagesToStrings converts an array of stages to a string array +func ConvertStagesToStrings(stages []audit.Stage) []string { + s := make([]string, len(stages)) + for i, stage := range stages { + s[i] = string(stage) + } + return s +} + +// ConvertStringSetToStages converts a string set to an array of stages +func ConvertStringSetToStages(set sets.String) []audit.Stage { + stages := make([]audit.Stage, len(set)) + for i, stage := range set.List() { + stages[i] = audit.Stage(stage) + } + return stages +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/request.go b/vendor/k8s.io/apiserver/pkg/audit/request.go index 9593b6c8ab4d1..d4b12770eab4e 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/request.go +++ b/vendor/k8s.io/apiserver/pkg/audit/request.go @@ -22,8 +22,8 @@ import ( "net/http" "time" - "github.com/golang/glog" "github.com/pborman/uuid" + "k8s.io/klog" "reflect" @@ -45,10 +45,10 @@ const ( func NewEventFromRequest(req *http.Request, level auditinternal.Level, attribs authorizer.Attributes) (*auditinternal.Event, error) { ev := &auditinternal.Event{ RequestReceivedTimestamp: metav1.NewMicroTime(time.Now()), - Verb: attribs.GetVerb(), - RequestURI: req.URL.RequestURI(), - UserAgent: maybeTruncateUserAgent(req), - Level: level, + Verb: attribs.GetVerb(), + RequestURI: req.URL.RequestURI(), + UserAgent: maybeTruncateUserAgent(req), + Level: level, } // prefer the id from the headers. If not available, create a new one. @@ -152,7 +152,7 @@ func LogRequestObject(ae *auditinternal.Event, obj runtime.Object, gvr schema.Gr ae.RequestObject, err = encodeObject(obj, gvr.GroupVersion(), s) if err != nil { // TODO(audit): add error slice to audit event struct - glog.Warningf("Auditing failed of %v request: %v", reflect.TypeOf(obj).Name(), err) + klog.Warningf("Auditing failed of %v request: %v", reflect.TypeOf(obj).Name(), err) return } } @@ -191,7 +191,7 @@ func LogResponseObject(ae *auditinternal.Event, obj runtime.Object, gv schema.Gr var err error ae.ResponseObject, err = encodeObject(obj, gv, s) if err != nil { - glog.Warningf("Audit failed for %q response: %v", reflect.TypeOf(obj).Name(), err) + klog.Warningf("Audit failed for %q response: %v", reflect.TypeOf(obj).Name(), err) } } @@ -223,7 +223,7 @@ func LogAnnotation(ae *auditinternal.Event, key, value string) { ae.Annotations = make(map[string]string) } if v, ok := ae.Annotations[key]; ok && v != value { - glog.Warningf("Failed to set annotations[%q] to %q for audit:%q, it has already been set to %q", key, value, ae.AuditID, ae.Annotations[key]) + klog.Warningf("Failed to set annotations[%q] to %q for audit:%q, it has already been set to %q", key, value, ae.AuditID, ae.Annotations[key]) return } ae.Annotations[key] = value diff --git a/vendor/k8s.io/apiserver/pkg/audit/types.go b/vendor/k8s.io/apiserver/pkg/audit/types.go index dbf03b0f51c24..b78bd086b0516 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/types.go +++ b/vendor/k8s.io/apiserver/pkg/audit/types.go @@ -25,7 +25,8 @@ type Sink interface { // Errors might be logged by the sink itself. If an error should be fatal, leading to an internal // error, ProcessEvents is supposed to panic. The event must not be mutated and is reused by the caller // after the call returns, i.e. the sink has to make a deepcopy to keep a copy around if necessary. - ProcessEvents(events ...*auditinternal.Event) + // Returns true on success, may return false on error. + ProcessEvents(events ...*auditinternal.Event) bool } type Backend interface { diff --git a/vendor/k8s.io/apiserver/pkg/audit/union.go b/vendor/k8s.io/apiserver/pkg/audit/union.go index 6ee441533a742..39dd74f740e8c 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/union.go +++ b/vendor/k8s.io/apiserver/pkg/audit/union.go @@ -37,10 +37,12 @@ type union struct { backends []Backend } -func (u union) ProcessEvents(events ...*auditinternal.Event) { +func (u union) ProcessEvents(events ...*auditinternal.Event) bool { + success := true for _, backend := range u.backends { - backend.ProcessEvents(events...) + success = backend.ProcessEvents(events...) && success } + return success } func (u union) Run(stopCh <-chan struct{}) error { diff --git a/vendor/k8s.io/apiserver/pkg/audit/util/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/audit/util/BUILD.bazel new file mode 100644 index 0000000000000..f8835c5619345 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/util/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["conversion.go"], + importmap = "k8s.io/kops/vendor/k8s.io/apiserver/pkg/audit/util", + importpath = "k8s.io/apiserver/pkg/audit/util", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/auditregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/webhook:go_default_library", + ], +) diff --git a/vendor/k8s.io/apiserver/pkg/audit/util/conversion.go b/vendor/k8s.io/apiserver/pkg/audit/util/conversion.go new file mode 100644 index 0000000000000..6b1f35c43957c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/util/conversion.go @@ -0,0 +1,43 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/api/auditregistration/v1alpha1" + "k8s.io/apiserver/pkg/util/webhook" +) + +// HookClientConfigForSink constructs a webhook.ClientConfig using a v1alpha1.AuditSink API object. +// webhook.ClientConfig is used to create a HookClient and the purpose of the config struct is to +// share that with other packages that need to create a HookClient. +func HookClientConfigForSink(a *v1alpha1.AuditSink) webhook.ClientConfig { + c := a.Spec.Webhook.ClientConfig + ret := webhook.ClientConfig{Name: a.Name, CABundle: c.CABundle} + if c.URL != nil { + ret.URL = *c.URL + } + if c.Service != nil { + ret.Service = &webhook.ClientConfigService{ + Name: c.Service.Name, + Namespace: c.Service.Namespace, + } + if c.Service.Path != nil { + ret.Service.Path = *c.Service.Path + } + } + return ret +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD.bazel index 9a71ecb12ca83..1ca8f7e33f921 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD.bazel @@ -2,7 +2,11 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["interfaces.go"], + srcs = [ + "audagnostic.go", + "audiences.go", + "interfaces.go", + ], importmap = "k8s.io/kops/vendor/k8s.io/apiserver/pkg/authentication/authenticator", importpath = "k8s.io/apiserver/pkg/authentication/authenticator", visibility = ["//visibility:public"], diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go new file mode 100644 index 0000000000000..bcf7eb4bc9d07 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import ( + "context" + "fmt" + "net/http" +) + +func authenticate(ctx context.Context, implicitAuds Audiences, authenticate func() (*Response, bool, error)) (*Response, bool, error) { + targetAuds, ok := AudiencesFrom(ctx) + // We can remove this once api audiences is never empty. That will probably + // be N releases after TokenRequest is GA. + if !ok { + return authenticate() + } + auds := implicitAuds.Intersect(targetAuds) + if len(auds) == 0 { + return nil, false, nil + } + resp, ok, err := authenticate() + if err != nil || !ok { + return nil, false, err + } + if len(resp.Audiences) > 0 { + // maybe the authenticator was audience aware after all. + return nil, false, fmt.Errorf("audience agnostic authenticator wrapped an authenticator that returned audiences: %q", resp.Audiences) + } + resp.Audiences = auds + return resp, true, nil +} + +type audAgnosticRequestAuthenticator struct { + implicit Audiences + delegate Request +} + +var _ = Request(&audAgnosticRequestAuthenticator{}) + +func (a *audAgnosticRequestAuthenticator) AuthenticateRequest(req *http.Request) (*Response, bool, error) { + return authenticate(req.Context(), a.implicit, func() (*Response, bool, error) { + return a.delegate.AuthenticateRequest(req) + }) +} + +// WrapAudienceAgnosticRequest wraps an audience agnostic request authenticator +// to restrict its accepted audiences to a set of implicit audiences. +func WrapAudienceAgnosticRequest(implicit Audiences, delegate Request) Request { + return &audAgnosticRequestAuthenticator{ + implicit: implicit, + delegate: delegate, + } +} + +type audAgnosticTokenAuthenticator struct { + implicit Audiences + delegate Token +} + +var _ = Token(&audAgnosticTokenAuthenticator{}) + +func (a *audAgnosticTokenAuthenticator) AuthenticateToken(ctx context.Context, tok string) (*Response, bool, error) { + return authenticate(ctx, a.implicit, func() (*Response, bool, error) { + return a.delegate.AuthenticateToken(ctx, tok) + }) +} + +// WrapAudienceAgnosticToken wraps an audience agnostic token authenticator to +// restrict its accepted audiences to a set of implicit audiences. +func WrapAudienceAgnosticToken(implicit Audiences, delegate Token) Token { + return &audAgnosticTokenAuthenticator{ + implicit: implicit, + delegate: delegate, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go new file mode 100644 index 0000000000000..2a3a918896d9b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import "context" + +// Audiences is a container for the Audiences of a token. +type Audiences []string + +// The key type is unexported to prevent collisions +type key int + +const ( + // audiencesKey is the context key for request audiences. + audiencesKey key = iota +) + +// WithAudiences returns a context that stores a request's expected audiences. +func WithAudiences(ctx context.Context, auds Audiences) context.Context { + return context.WithValue(ctx, audiencesKey, auds) +} + +// AudiencesFrom returns a request's expected audiences stored in the request context. +func AudiencesFrom(ctx context.Context) (Audiences, bool) { + auds, ok := ctx.Value(audiencesKey).(Audiences) + return auds, ok +} + +// Has checks if Audiences contains a specific audiences. +func (a Audiences) Has(taud string) bool { + for _, aud := range a { + if aud == taud { + return true + } + } + return false +} + +// Intersect intersects Audiences with a target Audiences and returns all +// elements in both. +func (a Audiences) Intersect(tauds Audiences) Audiences { + selected := Audiences{} + for _, taud := range tauds { + if a.Has(taud) { + selected = append(selected, taud) + } + } + return selected +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go index fd3d0383e52b3..e3b1b622cbaea 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go @@ -17,52 +17,64 @@ limitations under the License. package authenticator import ( + "context" "net/http" "k8s.io/apiserver/pkg/authentication/user" ) -// Token checks a string value against a backing authentication store and returns -// information about the current user and true if successful, false if not successful, -// or an error if the token could not be checked. +// Token checks a string value against a backing authentication store and +// returns a Response or an error if the token could not be checked. type Token interface { - AuthenticateToken(token string) (user.Info, bool, error) + AuthenticateToken(ctx context.Context, token string) (*Response, bool, error) } -// Request attempts to extract authentication information from a request and returns -// information about the current user and true if successful, false if not successful, -// or an error if the request could not be checked. +// Request attempts to extract authentication information from a request and +// returns a Response or an error if the request could not be checked. type Request interface { - AuthenticateRequest(req *http.Request) (user.Info, bool, error) + AuthenticateRequest(req *http.Request) (*Response, bool, error) } -// Password checks a username and password against a backing authentication store and -// returns information about the user and true if successful, false if not successful, -// or an error if the username and password could not be checked +// Password checks a username and password against a backing authentication +// store and returns a Response or an error if the password could not be +// checked. type Password interface { - AuthenticatePassword(user, password string) (user.Info, bool, error) + AuthenticatePassword(ctx context.Context, user, password string) (*Response, bool, error) } // TokenFunc is a function that implements the Token interface. -type TokenFunc func(token string) (user.Info, bool, error) +type TokenFunc func(ctx context.Context, token string) (*Response, bool, error) // AuthenticateToken implements authenticator.Token. -func (f TokenFunc) AuthenticateToken(token string) (user.Info, bool, error) { - return f(token) +func (f TokenFunc) AuthenticateToken(ctx context.Context, token string) (*Response, bool, error) { + return f(ctx, token) } // RequestFunc is a function that implements the Request interface. -type RequestFunc func(req *http.Request) (user.Info, bool, error) +type RequestFunc func(req *http.Request) (*Response, bool, error) // AuthenticateRequest implements authenticator.Request. -func (f RequestFunc) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (f RequestFunc) AuthenticateRequest(req *http.Request) (*Response, bool, error) { return f(req) } // PasswordFunc is a function that implements the Password interface. -type PasswordFunc func(user, password string) (user.Info, bool, error) +type PasswordFunc func(ctx context.Context, user, password string) (*Response, bool, error) // AuthenticatePassword implements authenticator.Password. -func (f PasswordFunc) AuthenticatePassword(user, password string) (user.Info, bool, error) { - return f(user, password) +func (f PasswordFunc) AuthenticatePassword(ctx context.Context, user, password string) (*Response, bool, error) { + return f(ctx, user, password) +} + +// Response is the struct returned by authenticator interfaces upon successful +// authentication. It contains information about whether the authenticator +// authenticated the request, information about the context of the +// authentication, and information about the authenticated user. +type Response struct { + // Audiences is the set of audiences the authenticator was able to validate + // the token against. If the authenticator is not audience aware, this field + // will be empty. + Audiences Audiences + // User is the UserInfo associated with the authentication context. + User user.Info } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD.bazel index cafdd7affe387..49ff690a787eb 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD.bazel @@ -20,6 +20,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/authentication/request/union:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/request/websocket:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/request/x509:go_default_library", + "//vendor/k8s.io/apiserver/pkg/authentication/token/cache:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go index d8e18345545eb..67958c3639b85 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go @@ -31,6 +31,7 @@ import ( unionauth "k8s.io/apiserver/pkg/authentication/request/union" "k8s.io/apiserver/pkg/authentication/request/websocket" "k8s.io/apiserver/pkg/authentication/request/x509" + "k8s.io/apiserver/pkg/authentication/token/cache" webhooktoken "k8s.io/apiserver/plugin/pkg/authenticator/token/webhook" authenticationclient "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" "k8s.io/client-go/util/cert" @@ -50,6 +51,8 @@ type DelegatingAuthenticatorConfig struct { // ClientCAFile is the CA bundle file used to authenticate client certificates ClientCAFile string + APIAudiences authenticator.Audiences + RequestHeaderConfig *RequestHeaderConfig } @@ -85,11 +88,12 @@ func (c DelegatingAuthenticatorConfig) New() (authenticator.Request, *spec.Secur } if c.TokenAccessReviewClient != nil { - tokenAuth, err := webhooktoken.NewFromInterface(c.TokenAccessReviewClient, c.CacheTTL) + tokenAuth, err := webhooktoken.NewFromInterface(c.TokenAccessReviewClient, c.APIAudiences) if err != nil { return nil, nil, err } - authenticators = append(authenticators, bearertoken.New(tokenAuth), websocket.NewProtocolAuthenticator(tokenAuth)) + cachingTokenAuth := cache.New(tokenAuth, false, c.CacheTTL, c.CacheTTL) + authenticators = append(authenticators, bearertoken.New(cachingTokenAuth), websocket.NewProtocolAuthenticator(cachingTokenAuth)) securityDefinitions["BearerToken"] = &spec.SecurityScheme{ SecuritySchemeProps: spec.SecuritySchemeProps{ diff --git a/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go b/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go index 9f0453b15f232..5ac6b2ddf9d75 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go @@ -36,25 +36,26 @@ func NewAuthenticatedGroupAdder(auth authenticator.Request) authenticator.Reques return &AuthenticatedGroupAdder{auth} } -func (g *AuthenticatedGroupAdder) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { - u, ok, err := g.Authenticator.AuthenticateRequest(req) +func (g *AuthenticatedGroupAdder) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + r, ok, err := g.Authenticator.AuthenticateRequest(req) if err != nil || !ok { return nil, ok, err } - if u.GetName() == user.Anonymous { - return u, true, nil + if r.User.GetName() == user.Anonymous { + return r, true, nil } - for _, group := range u.GetGroups() { + for _, group := range r.User.GetGroups() { if group == user.AllAuthenticated || group == user.AllUnauthenticated { - return u, true, nil + return r, true, nil } } - return &user.DefaultInfo{ - Name: u.GetName(), - UID: u.GetUID(), - Groups: append(u.GetGroups(), user.AllAuthenticated), - Extra: u.GetExtra(), - }, true, nil + r.User = &user.DefaultInfo{ + Name: r.User.GetName(), + UID: r.User.GetUID(), + Groups: append(r.User.GetGroups(), user.AllAuthenticated), + Extra: r.User.GetExtra(), + } + return r, true, nil } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go b/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go index 1f71429b4ef92..3079dad625474 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go @@ -36,15 +36,16 @@ func NewGroupAdder(auth authenticator.Request, groups []string) authenticator.Re return &GroupAdder{auth, groups} } -func (g *GroupAdder) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { - u, ok, err := g.Authenticator.AuthenticateRequest(req) +func (g *GroupAdder) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + r, ok, err := g.Authenticator.AuthenticateRequest(req) if err != nil || !ok { return nil, ok, err } - return &user.DefaultInfo{ - Name: u.GetName(), - UID: u.GetUID(), - Groups: append(u.GetGroups(), g.Groups...), - Extra: u.GetExtra(), - }, true, nil + r.User = &user.DefaultInfo{ + Name: r.User.GetName(), + UID: r.User.GetUID(), + Groups: append(r.User.GetGroups(), g.Groups...), + Extra: r.User.GetExtra(), + } + return r, true, nil } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go b/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go index 4f60d522f7602..0ed5ee55965a9 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go @@ -17,6 +17,8 @@ limitations under the License. package group import ( + "context" + "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" ) @@ -34,15 +36,16 @@ func NewTokenGroupAdder(auth authenticator.Token, groups []string) authenticator return &TokenGroupAdder{auth, groups} } -func (g *TokenGroupAdder) AuthenticateToken(token string) (user.Info, bool, error) { - u, ok, err := g.Authenticator.AuthenticateToken(token) +func (g *TokenGroupAdder) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { + r, ok, err := g.Authenticator.AuthenticateToken(ctx, token) if err != nil || !ok { return nil, ok, err } - return &user.DefaultInfo{ - Name: u.GetName(), - UID: u.GetUID(), - Groups: append(u.GetGroups(), g.Groups...), - Extra: u.GetExtra(), - }, true, nil + r.User = &user.DefaultInfo{ + Name: r.User.GetName(), + UID: r.User.GetUID(), + Groups: append(r.User.GetGroups(), g.Groups...), + Extra: r.User.GetExtra(), + } + return r, true, nil } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go b/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go index a6d22942a3258..f9177d151379a 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go @@ -30,7 +30,14 @@ const ( ) func NewAuthenticator() authenticator.Request { - return authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) { - return &user.DefaultInfo{Name: anonymousUser, Groups: []string{unauthenticatedGroup}}, true, nil + return authenticator.RequestFunc(func(req *http.Request) (*authenticator.Response, bool, error) { + auds, _ := authenticator.AudiencesFrom(req.Context()) + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: anonymousUser, + Groups: []string{unauthenticatedGroup}, + }, + Audiences: auds, + }, true, nil }) } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD.bazel index 96c280a6d9fa6..05c0121ff33dc 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD.bazel @@ -6,8 +6,5 @@ go_library( importmap = "k8s.io/kops/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken", importpath = "k8s.io/apiserver/pkg/authentication/request/bearertoken", visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - ], + deps = ["//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go b/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go index 5ca22f38ba894..2de796b723286 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go @@ -22,7 +22,6 @@ import ( "strings" "k8s.io/apiserver/pkg/authentication/authenticator" - "k8s.io/apiserver/pkg/authentication/user" ) type Authenticator struct { @@ -35,7 +34,7 @@ func New(auth authenticator.Token) *Authenticator { var invalidToken = errors.New("invalid bearer token") -func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { auth := strings.TrimSpace(req.Header.Get("Authorization")) if auth == "" { return nil, false, nil @@ -52,7 +51,7 @@ func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, return nil, false, nil } - user, ok, err := a.auth.AuthenticateToken(token) + resp, ok, err := a.auth.AuthenticateToken(req.Context(), token) // if we authenticated successfully, go ahead and remove the bearer token so that no one // is ever tempted to use it inside of the API server if ok { @@ -64,5 +63,5 @@ func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, err = invalidToken } - return user, ok, err + return resp, ok, err } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go index 948478b80ed39..70af861d8b5dd 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go @@ -105,7 +105,7 @@ func NewSecure(clientCA string, proxyClientNames []string, nameHeaders []string, return x509request.NewVerifier(opts, headerAuthenticator, sets.NewString(proxyClientNames...)), nil } -func (a *requestHeaderAuthRequestHandler) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (a *requestHeaderAuthRequestHandler) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { name := headerValue(req.Header, a.nameHeaders) if len(name) == 0 { return nil, false, nil @@ -126,10 +126,12 @@ func (a *requestHeaderAuthRequestHandler) AuthenticateRequest(req *http.Request) } } - return &user.DefaultInfo{ - Name: name, - Groups: groups, - Extra: extra, + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: name, + Groups: groups, + Extra: extra, + }, }, true, nil } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD.bazel index fdb14b73217a3..1cc8e943086e1 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD.bazel @@ -9,6 +9,5 @@ go_library( deps = [ "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go b/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go index 1613940981064..512063beea13a 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go @@ -21,7 +21,6 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/authentication/authenticator" - "k8s.io/apiserver/pkg/authentication/user" ) // unionAuthRequestHandler authenticates requests using a chain of authenticator.Requests @@ -51,20 +50,20 @@ func NewFailOnError(authRequestHandlers ...authenticator.Request) authenticator. } // AuthenticateRequest authenticates the request using a chain of authenticator.Request objects. -func (authHandler *unionAuthRequestHandler) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (authHandler *unionAuthRequestHandler) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { var errlist []error for _, currAuthRequestHandler := range authHandler.Handlers { - info, ok, err := currAuthRequestHandler.AuthenticateRequest(req) + resp, ok, err := currAuthRequestHandler.AuthenticateRequest(req) if err != nil { if authHandler.FailOnError { - return info, ok, err + return resp, ok, err } errlist = append(errlist, err) continue } if ok { - return info, ok, err + return resp, ok, err } } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD.bazel index cf4405b137dab..c3a547d763c5f 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD.bazel @@ -8,7 +8,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/wsstream:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go b/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go index 4a30bb6359cce..11afa84cbd0e1 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go @@ -25,7 +25,6 @@ import ( "unicode/utf8" "k8s.io/apiserver/pkg/authentication/authenticator" - "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/util/wsstream" ) @@ -46,7 +45,7 @@ func NewProtocolAuthenticator(auth authenticator.Token) *ProtocolAuthenticator { return &ProtocolAuthenticator{auth} } -func (a *ProtocolAuthenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (a *ProtocolAuthenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { // Only accept websocket connections if !wsstream.IsWebSocketRequest(req) { return nil, false, nil @@ -91,7 +90,7 @@ func (a *ProtocolAuthenticator) AuthenticateRequest(req *http.Request) (user.Inf return nil, false, nil } - user, ok, err := a.auth.AuthenticateToken(token) + resp, ok, err := a.auth.AuthenticateToken(req.Context(), token) // on success, remove the protocol with the token if ok { @@ -105,5 +104,5 @@ func (a *ProtocolAuthenticator) AuthenticateRequest(req *http.Request) (user.Inf err = errInvalidToken } - return user, ok, err + return resp, ok, err } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD.bazel index 2cca090a89d90..36a9ec4dcbd14 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD.bazel @@ -10,7 +10,6 @@ go_library( importpath = "k8s.io/apiserver/pkg/authentication/request/x509", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS new file mode 100644 index 0000000000000..470b7a1c92d15 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go index c98d7ff681fcb..bc875adacf1e3 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go @@ -23,7 +23,6 @@ import ( "net/http" "time" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -60,14 +59,14 @@ func init() { // UserConversion defines an interface for extracting user info from a client certificate chain type UserConversion interface { - User(chain []*x509.Certificate) (user.Info, bool, error) + User(chain []*x509.Certificate) (*authenticator.Response, bool, error) } // UserConversionFunc is a function that implements the UserConversion interface. -type UserConversionFunc func(chain []*x509.Certificate) (user.Info, bool, error) +type UserConversionFunc func(chain []*x509.Certificate) (*authenticator.Response, bool, error) // User implements x509.UserConversion -func (f UserConversionFunc) User(chain []*x509.Certificate) (user.Info, bool, error) { +func (f UserConversionFunc) User(chain []*x509.Certificate) (*authenticator.Response, bool, error) { return f(chain) } @@ -84,7 +83,7 @@ func New(opts x509.VerifyOptions, user UserConversion) *Authenticator { } // AuthenticateRequest authenticates the request using presented client certificates -func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 { return nil, false, nil } @@ -136,7 +135,7 @@ func NewVerifier(opts x509.VerifyOptions, auth authenticator.Request, allowedCom } // AuthenticateRequest verifies the presented client certificate, then delegates to the wrapped auth -func (a *Verifier) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (a *Verifier) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 { return nil, false, nil } @@ -168,8 +167,7 @@ func (a *Verifier) verifySubject(subject pkix.Name) error { if a.allowedCommonNames.Has(subject.CommonName) { return nil } - glog.Warningf("x509: subject with cn=%s is not in the allowed list: %v", subject.CommonName, a.allowedCommonNames.List()) - return fmt.Errorf("x509: subject with cn=%s is not allowed", subject.CommonName) + return fmt.Errorf("x509: subject with cn=%s is not in the allowed list", subject.CommonName) } // DefaultVerifyOptions returns VerifyOptions that use the system root certificates, current time, @@ -181,12 +179,14 @@ func DefaultVerifyOptions() x509.VerifyOptions { } // CommonNameUserConversion builds user info from a certificate chain using the subject's CommonName -var CommonNameUserConversion = UserConversionFunc(func(chain []*x509.Certificate) (user.Info, bool, error) { +var CommonNameUserConversion = UserConversionFunc(func(chain []*x509.Certificate) (*authenticator.Response, bool, error) { if len(chain[0].Subject.CommonName) == 0 { return nil, false, nil } - return &user.DefaultInfo{ - Name: chain[0].Subject.CommonName, - Groups: chain[0].Subject.Organization, + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: chain[0].Subject.CommonName, + Groups: chain[0].Subject.Organization, + }, }, true, nil }) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD.bazel new file mode 100644 index 0000000000000..07e52be82f6ee --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "cache_simple.go", + "cache_striped.go", + "cached_token_authenticator.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/apiserver/pkg/authentication/token/cache", + importpath = "k8s.io/apiserver/pkg/authentication/token/cache", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/util/cache:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", + "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", + ], +) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go new file mode 100644 index 0000000000000..18d5692d7a72c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "time" + + lrucache "k8s.io/apimachinery/pkg/util/cache" + "k8s.io/apimachinery/pkg/util/clock" +) + +type simpleCache struct { + lru *lrucache.LRUExpireCache +} + +func newSimpleCache(size int, clock clock.Clock) cache { + return &simpleCache{lru: lrucache.NewLRUExpireCacheWithClock(size, clock)} +} + +func (c *simpleCache) get(key string) (*cacheRecord, bool) { + record, ok := c.lru.Get(key) + if !ok { + return nil, false + } + value, ok := record.(*cacheRecord) + return value, ok +} + +func (c *simpleCache) set(key string, value *cacheRecord, ttl time.Duration) { + c.lru.Add(key, value, ttl) +} + +func (c *simpleCache) remove(key string) { + c.lru.Remove(key) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go new file mode 100644 index 0000000000000..e5b7afe4e7d31 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "hash/fnv" + "time" +) + +// split cache lookups across N striped caches +type stripedCache struct { + stripeCount uint32 + hashFunc func(string) uint32 + caches []cache +} + +type hashFunc func(string) uint32 +type newCacheFunc func() cache + +func newStripedCache(stripeCount int, hash hashFunc, newCacheFunc newCacheFunc) cache { + caches := []cache{} + for i := 0; i < stripeCount; i++ { + caches = append(caches, newCacheFunc()) + } + return &stripedCache{ + stripeCount: uint32(stripeCount), + hashFunc: hash, + caches: caches, + } +} + +func (c *stripedCache) get(key string) (*cacheRecord, bool) { + return c.caches[c.hashFunc(key)%c.stripeCount].get(key) +} +func (c *stripedCache) set(key string, value *cacheRecord, ttl time.Duration) { + c.caches[c.hashFunc(key)%c.stripeCount].set(key, value, ttl) +} +func (c *stripedCache) remove(key string) { + c.caches[c.hashFunc(key)%c.stripeCount].remove(key) +} + +func fnvHashFunc(key string) uint32 { + f := fnv.New32() + f.Write([]byte(key)) + return f.Sum32() +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go new file mode 100644 index 0000000000000..ea3853a38b417 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "time" + + utilclock "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/authentication/authenticator" +) + +// cacheRecord holds the three return values of the authenticator.Token AuthenticateToken method +type cacheRecord struct { + resp *authenticator.Response + ok bool + err error +} + +type cachedTokenAuthenticator struct { + authenticator authenticator.Token + + cacheErrs bool + successTTL time.Duration + failureTTL time.Duration + + cache cache +} + +type cache interface { + // given a key, return the record, and whether or not it existed + get(key string) (value *cacheRecord, exists bool) + // caches the record for the key + set(key string, value *cacheRecord, ttl time.Duration) + // removes the record for the key + remove(key string) +} + +// New returns a token authenticator that caches the results of the specified authenticator. A ttl of 0 bypasses the cache. +func New(authenticator authenticator.Token, cacheErrs bool, successTTL, failureTTL time.Duration) authenticator.Token { + return newWithClock(authenticator, cacheErrs, successTTL, failureTTL, utilclock.RealClock{}) +} + +func newWithClock(authenticator authenticator.Token, cacheErrs bool, successTTL, failureTTL time.Duration, clock utilclock.Clock) authenticator.Token { + return &cachedTokenAuthenticator{ + authenticator: authenticator, + cacheErrs: cacheErrs, + successTTL: successTTL, + failureTTL: failureTTL, + cache: newStripedCache(32, fnvHashFunc, func() cache { return newSimpleCache(128, clock) }), + } +} + +// AuthenticateToken implements authenticator.Token +func (a *cachedTokenAuthenticator) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { + auds, _ := authenticator.AudiencesFrom(ctx) + + key := keyFunc(auds, token) + if record, ok := a.cache.get(key); ok { + return record.resp, record.ok, record.err + } + + resp, ok, err := a.authenticator.AuthenticateToken(ctx, token) + if !a.cacheErrs && err != nil { + return resp, ok, err + } + + switch { + case ok && a.successTTL > 0: + a.cache.set(key, &cacheRecord{resp: resp, ok: ok, err: err}, a.successTTL) + case !ok && a.failureTTL > 0: + a.cache.set(key, &cacheRecord{resp: resp, ok: ok, err: err}, a.failureTTL) + } + + return resp, ok, err +} + +func keyFunc(auds []string, token string) string { + return fmt.Sprintf("%#v|%v", auds, token) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD.bazel index 7203ab0248cdc..110b441fd18a2 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD.bazel @@ -7,7 +7,8 @@ go_library( importpath = "k8s.io/apiserver/pkg/authentication/token/tokenfile", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go b/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go index 57bb6c596d8e3..69568f17dd25a 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go @@ -17,14 +17,16 @@ limitations under the License. package tokenfile import ( + "context" "encoding/csv" "fmt" "io" "os" "strings" - "github.com/golang/glog" + "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/klog" ) type TokenAuthenticator struct { @@ -65,7 +67,7 @@ func NewCSV(path string) (*TokenAuthenticator, error) { recordNum++ if record[0] == "" { - glog.Warningf("empty token has been found in token file '%s', record number '%d'", path, recordNum) + klog.Warningf("empty token has been found in token file '%s', record number '%d'", path, recordNum) continue } @@ -74,7 +76,7 @@ func NewCSV(path string) (*TokenAuthenticator, error) { UID: record[2], } if _, exist := tokens[record[0]]; exist { - glog.Warningf("duplicate token has been found in token file '%s', record number '%d'", path, recordNum) + klog.Warningf("duplicate token has been found in token file '%s', record number '%d'", path, recordNum) } tokens[record[0]] = obj @@ -88,10 +90,10 @@ func NewCSV(path string) (*TokenAuthenticator, error) { }, nil } -func (a *TokenAuthenticator) AuthenticateToken(value string) (user.Info, bool, error) { +func (a *TokenAuthenticator) AuthenticateToken(ctx context.Context, value string) (*authenticator.Response, bool, error) { user, ok := a.tokens[value] if !ok { return nil, false, nil } - return user, true, nil + return &authenticator.Response{User: user}, true, nil } diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/OWNERS b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/OWNERS index 63cf9723bb14b..cb2ae800f36a1 100755 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/OWNERS +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/OWNERS @@ -1,4 +1,3 @@ reviewers: - deads2k - dims -- ericchiang diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/endpoints/BUILD.bazel index 084e195737cd0..57ad737111728 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/endpoints/BUILD.bazel @@ -27,10 +27,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/filters:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/openapi:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/builder:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/util:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD.bazel index 405cc1617a5c6..9932bdeca75c2 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD.bazel @@ -15,7 +15,6 @@ go_library( importpath = "k8s.io/apiserver/pkg/endpoints/filters", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -34,5 +33,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/httplog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go index 4946341078961..458c8a67c9278 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go @@ -56,7 +56,11 @@ func WithAudit(handler http.Handler, sink audit.Sink, policy policy.Checker, lon } ev.Stage = auditinternal.StageRequestReceived - processAuditEvent(sink, ev, omitStages) + if processed := processAuditEvent(sink, ev, omitStages); !processed { + audit.ApiserverAuditDroppedCounter.Inc() + responsewriters.InternalError(w, req, errors.New("failed to store audit event")) + return + } // intercept the status code var longRunningSink audit.Sink @@ -137,10 +141,10 @@ func createAuditEventAndAttachToContext(req *http.Request, policy policy.Checker return req, ev, omitStages, nil } -func processAuditEvent(sink audit.Sink, ev *auditinternal.Event, omitStages []auditinternal.Stage) { +func processAuditEvent(sink audit.Sink, ev *auditinternal.Event, omitStages []auditinternal.Stage) bool { for _, stage := range omitStages { if ev.Stage == stage { - return + return true } } @@ -150,7 +154,7 @@ func processAuditEvent(sink audit.Sink, ev *auditinternal.Event, omitStages []au ev.StageTimestamp = metav1.NewMicroTime(time.Now()) } audit.ObserveEvent() - sink.ProcessEvents(ev) + return sink.ProcessEvents(ev) } func decorateResponseWriter(responseWriter http.ResponseWriter, ev *auditinternal.Event, sink audit.Sink, omitStages []auditinternal.Stage) http.ResponseWriter { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go index ba53fc609e03e..d9f70efac264a 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go @@ -21,8 +21,8 @@ import ( "net/http" "strings" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -50,27 +50,33 @@ func init() { // stores any such user found onto the provided context for the request. If authentication fails or returns an error // the failed handler is used. On success, "Authorization" header is removed from the request and handler // is invoked to serve the request. -func WithAuthentication(handler http.Handler, auth authenticator.Request, failed http.Handler) http.Handler { +func WithAuthentication(handler http.Handler, auth authenticator.Request, failed http.Handler, apiAuds authenticator.Audiences) http.Handler { if auth == nil { - glog.Warningf("Authentication is disabled") + klog.Warningf("Authentication is disabled") return handler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - user, ok, err := auth.AuthenticateRequest(req) + if len(apiAuds) > 0 { + req = req.WithContext(authenticator.WithAudiences(req.Context(), apiAuds)) + } + resp, ok, err := auth.AuthenticateRequest(req) if err != nil || !ok { if err != nil { - glog.Errorf("Unable to authenticate the request due to an error: %v", err) + klog.Errorf("Unable to authenticate the request due to an error: %v", err) } failed.ServeHTTP(w, req) return } + // TODO(mikedanese): verify the response audience matches one of apiAuds if + // non-empty + // authorization header is not required anymore in case of a successful authentication. req.Header.Del("Authorization") - req = req.WithContext(genericapirequest.WithUser(req.Context(), user)) + req = req.WithContext(genericapirequest.WithUser(req.Context(), resp.User)) - authenticatedUserCounter.WithLabelValues(compressUsername(user.GetName())).Inc() + authenticatedUserCounter.WithLabelValues(compressUsername(resp.User.GetName())).Inc() handler.ServeHTTP(w, req) }) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go index 998c05bcf7326..c6ab15b3d672a 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go @@ -21,7 +21,7 @@ import ( "errors" "net/http" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/audit" @@ -44,7 +44,7 @@ const ( // WithAuthorizationCheck passes all authorized requests on to handler, and returns a forbidden error otherwise. func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler { if a == nil { - glog.Warningf("Authorization is disabled") + klog.Warningf("Authorization is disabled") return handler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { @@ -70,10 +70,10 @@ func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime. return } - glog.V(4).Infof("Forbidden: %#v, Reason: %q", req.RequestURI, reason) + klog.V(4).Infof("Forbidden: %#v, Reason: %q", req.RequestURI, reason) audit.LogAnnotation(ae, decisionAnnotationKey, decisionForbid) audit.LogAnnotation(ae, reasonAnnotationKey, reason) - responsewriters.Forbidden(ctx, attributes, w, req, "", s) + responsewriters.Forbidden(ctx, attributes, w, req, reason, s) }) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go index 38414a6afa75e..d017f2bf68752 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go @@ -23,7 +23,7 @@ import ( "net/url" "strings" - "github.com/golang/glog" + "k8s.io/klog" authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/api/core/v1" @@ -42,7 +42,7 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime. return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { impersonationRequests, err := buildImpersonationRequests(req.Header) if err != nil { - glog.V(4).Infof("%v", err) + klog.V(4).Infof("%v", err) responsewriters.InternalError(w, req, err) return } @@ -102,15 +102,15 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime. userExtra[extraKey] = append(userExtra[extraKey], extraValue) default: - glog.V(4).Infof("unknown impersonation request type: %v", impersonationRequest) + klog.V(4).Infof("unknown impersonation request type: %v", impersonationRequest) responsewriters.Forbidden(ctx, actingAsAttributes, w, req, fmt.Sprintf("unknown impersonation request type: %v", impersonationRequest), s) return } decision, reason, err := a.Authorize(actingAsAttributes) if err != nil || decision != authorizer.DecisionAllow { - glog.V(4).Infof("Forbidden: %#v, Reason: %s, Error: %v", req.RequestURI, reason, err) - responsewriters.Forbidden(ctx, actingAsAttributes, w, req, "", s) + klog.V(4).Infof("Forbidden: %#v, Reason: %s, Error: %v", req.RequestURI, reason, err) + responsewriters.Forbidden(ctx, actingAsAttributes, w, req, reason, s) return } } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go b/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go index 695c62b5984a0..79cfefe4669bf 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go @@ -31,7 +31,7 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/endpoints/discovery" "k8s.io/apiserver/pkg/registry/rest" - openapicommon "k8s.io/kube-openapi/pkg/common" + openapiproto "k8s.io/kube-openapi/pkg/util/proto" ) // APIGroupVersion is a helper for exposing rest.Storage objects as http.Handlers via go-restful @@ -85,8 +85,12 @@ type APIGroupVersion struct { // if the client requests it via Accept-Encoding EnableAPIResponseCompression bool - // OpenAPIConfig lets the individual handlers build a subset of the OpenAPI schema before they are installed. - OpenAPIConfig *openapicommon.Config + // OpenAPIModels exposes the OpenAPI models to each individual handler. + OpenAPIModels openapiproto.Models + + // The limit on the request body size that would be accepted and decoded in a write request. + // 0 means no limit. + MaxRequestBodyBytes int64 } // InstallREST registers the REST handlers (storage, watch, proxy and redirect) into a restful Container. diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD.bazel index 978e5d6ac8e79..6071803a2a04e 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD.bazel @@ -19,7 +19,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/websocket:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", @@ -52,6 +51,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/wsstream:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go index e40e4288ac574..6293a72867a4b 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -77,9 +77,9 @@ func createHandler(r rest.NamedCreater, scope RequestScope, admit admission.Inte scope.err(err, w, req) return } - decoder := scope.Serializer.DecoderToVersion(s.Serializer, schema.GroupVersion{Group: gv.Group, Version: runtime.APIVersionInternal}) + decoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion) - body, err := readBody(req) + body, err := limitedReadBody(req, scope.MaxRequestBodyBytes) if err != nil { scope.err(err, w, req) return @@ -167,6 +167,7 @@ func createHandler(r rest.NamedCreater, scope RequestScope, admit admission.Inte status.Code = int32(code) } + scope.Trace = trace transformResponseObject(ctx, scope, req, w, code, result) } } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go index ff35fa9dddd58..2961eb75c85f0 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go @@ -66,7 +66,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestSco options := &metav1.DeleteOptions{} if allowsOptions { - body, err := readBody(req) + body, err := limitedReadBody(req, scope.MaxRequestBodyBytes) if err != nil { scope.err(err, w, req) return @@ -175,6 +175,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestSco } } + scope.Trace = trace transformResponseObject(ctx, scope, req, w, status, result) } } @@ -182,6 +183,9 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestSco // DeleteCollection returns a function that will handle a collection deletion func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope RequestScope, admit admission.Interface) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { + trace := utiltrace.New("Delete " + req.URL.Path) + defer trace.LogIfLong(500 * time.Millisecond) + if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { scope.err(errors.NewBadRequest("the dryRun alpha feature is disabled"), w, req) return @@ -223,7 +227,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope RequestSco options := &metav1.DeleteOptions{} if checkBody { - body, err := readBody(req) + body, err := limitedReadBody(req, scope.MaxRequestBodyBytes) if err != nil { scope.err(err, w, req) return @@ -310,6 +314,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope RequestSco } } + scope.Trace = trace transformResponseObject(ctx, scope, req, w, http.StatusOK, result) } } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go index b234bcca4a70f..0f1c59946a387 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go @@ -25,7 +25,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -74,7 +74,9 @@ func getResourceHandler(scope RequestScope, getter getterFunc) http.HandlerFunc } trace.Step("About to write a response") + scope.Trace = trace transformResponseObject(ctx, scope, req, w, http.StatusOK, result) + trace.Step("Transformed response object") } } @@ -242,7 +244,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope RequestScope, forceWatch if timeout == 0 && minRequestTimeout > 0 { timeout = time.Duration(float64(minRequestTimeout) * (rand.Float64() + 1.0)) } - glog.V(3).Infof("Starting watch for %s, rv=%s labels=%s fields=%s timeout=%s", req.URL.Path, opts.ResourceVersion, opts.LabelSelector, opts.FieldSelector, timeout) + klog.V(3).Infof("Starting watch for %s, rv=%s labels=%s fields=%s timeout=%s", req.URL.Path, opts.ResourceVersion, opts.LabelSelector, opts.FieldSelector, timeout) watcher, err := rw.Watch(ctx, &opts) if err != nil { @@ -279,6 +281,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope RequestScope, forceWatch } } + scope.Trace = trace transformResponseObject(ctx, scope, req, w, http.StatusOK, result) trace.Step(fmt.Sprintf("Writing http response done (%d items)", numberOfItems)) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index b6b9958df9fda..e06dcdcf5c74f 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -47,6 +47,11 @@ import ( utiltrace "k8s.io/apiserver/pkg/util/trace" ) +const ( + // maximum number of operations a single json patch may contain. + maxJSONPatchOperations = 10000 +) + // PatchResource returns a function that will handle a resource patch. func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface, patchTypes []string) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { @@ -88,7 +93,7 @@ func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface ctx := req.Context() ctx = request.WithNamespace(ctx, namespace) - patchJS, err := readBody(req) + patchJS, err := limitedReadBody(req, scope.MaxRequestBodyBytes) if err != nil { scope.err(err, w, req) return @@ -118,9 +123,10 @@ func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface return } gv := scope.Kind.GroupVersion() + codec := runtime.NewCodec( scope.Serializer.EncoderForVersion(s.Serializer, gv), - scope.Serializer.DecoderToVersion(s.Serializer, schema.GroupVersion{Group: gv.Group, Version: runtime.APIVersionInternal}), + scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion), ) userInfo, _ := request.UserFrom(ctx) @@ -163,6 +169,8 @@ func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface kind: scope.Kind, resource: scope.Resource, + hubGroupVersion: scope.HubGroupVersion, + createValidation: rest.AdmissionToValidateObjectFunc(admit, staticAdmissionAttributes), updateValidation: rest.AdmissionToValidateObjectUpdateFunc(admit, staticAdmissionAttributes), admissionCheck: admissionCheck, @@ -198,6 +206,7 @@ func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface } trace.Step("Self-link added") + scope.Trace = trace transformResponseObject(ctx, scope, req, w, http.StatusOK, result) } } @@ -218,6 +227,8 @@ type patcher struct { resource schema.GroupVersionResource kind schema.GroupVersionKind + hubGroupVersion schema.GroupVersion + // Validation functions createValidation rest.ValidateObjectFunc updateValidation rest.ValidateObjectUpdateFunc @@ -242,11 +253,6 @@ type patcher struct { mechanism patchMechanism } -func (p *patcher) toUnversioned(versionedObj runtime.Object) (runtime.Object, error) { - gvk := p.kind.GroupKind().WithVersion(runtime.APIVersionInternal) - return p.unsafeConvertor.ConvertToVersion(versionedObj, gvk.GroupVersion()) -} - type patchMechanism interface { applyPatchToCurrentObject(currentObject runtime.Object) (runtime.Object, error) } @@ -265,7 +271,7 @@ func (p *jsonPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (r // Apply the patch. patchedObjJS, err := p.applyJSPatch(currentObjJS) if err != nil { - return nil, interpretPatchError(err) + return nil, err } // Construct the resulting typed, unversioned object. @@ -284,9 +290,18 @@ func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr case types.JSONPatchType: patchObj, err := jsonpatch.DecodePatch(p.patchJS) if err != nil { - return nil, err + return nil, errors.NewBadRequest(err.Error()) + } + if len(patchObj) > maxJSONPatchOperations { + return nil, errors.NewRequestEntityTooLargeError( + fmt.Sprintf("The allowed maximum operations in a JSON patch is %d, got %d", + maxJSONPatchOperations, len(patchObj))) + } + patchedJS, err := patchObj.Apply(versionedJS) + if err != nil { + return nil, errors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false) } - return patchObj.Apply(versionedJS) + return patchedJS, nil case types.MergePatchType: return jsonpatch.MergePatch(versionedJS, p.patchJS) default: @@ -316,13 +331,8 @@ func (p *smpPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (ru if err := strategicPatchObject(p.defaulter, currentVersionedObject, p.patchJS, versionedObjToUpdate, p.schemaReferenceObj); err != nil { return nil, err } - // Convert the object back to unversioned (aka internal version). - unversionedObjToUpdate, err := p.toUnversioned(versionedObjToUpdate) - if err != nil { - return nil, err - } - - return unversionedObjToUpdate, nil + // Convert the object back to the hub version + return p.unsafeConvertor.ConvertToVersion(versionedObjToUpdate, p.hubGroupVersion) } // strategicPatchObject applies a strategic merge patch of to @@ -415,7 +425,7 @@ func applyPatchToObject( ) error { patchedObjMap, err := strategicpatch.StrategicMergeMapPatch(originalMap, patchMap, schemaReferenceObj) if err != nil { - return interpretPatchError(err) + return interpretStrategicMergePatchError(err) } // Rather than serialize the patched map to JSON, then decode it to an object, we go directly from a map to an object @@ -428,8 +438,8 @@ func applyPatchToObject( return nil } -// interpretPatchError interprets the error type and returns an error with appropriate HTTP code. -func interpretPatchError(err error) error { +// interpretStrategicMergePatchError interprets the error type and returns an error with appropriate HTTP code. +func interpretStrategicMergePatchError(err error) error { switch err { case mergepatch.ErrBadJSONDoc, mergepatch.ErrBadPatchFormatForPrimitiveList, mergepatch.ErrBadPatchFormatForRetainKeys, mergepatch.ErrBadPatchFormatForSetElementOrderList, mergepatch.ErrUnsupportedStrategicMergePatchFormat: return errors.NewBadRequest(err.Error()) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go index 8cee470a51e42..e140c081746b6 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go @@ -35,9 +35,11 @@ import ( // Will write the complete response object. func transformResponseObject(ctx context.Context, scope RequestScope, req *http.Request, w http.ResponseWriter, statusCode int, result runtime.Object) { // TODO: fetch the media type much earlier in request processing and pass it into this method. + trace := scope.Trace mediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, &scope) if err != nil { status := responsewriters.ErrorToAPIStatus(err) + trace.Step("Writing raw JSON response") responsewriters.WriteRawJSON(int(status.Code), status, w) return } @@ -68,6 +70,7 @@ func transformResponseObject(ctx context.Context, scope RequestScope, req *http. return } encoder := metainternalversion.Codecs.EncoderForVersion(info.Serializer, metav1beta1.SchemeGroupVersion) + trace.Step(fmt.Sprintf("Serializing response as type %s", info.MediaType)) responsewriters.SerializeObject(info.MediaType, encoder, w, req, statusCode, partial) return @@ -79,6 +82,7 @@ func transformResponseObject(ctx context.Context, scope RequestScope, req *http. return } list := &metav1beta1.PartialObjectMetadataList{} + trace.Step("Processing list items") err := meta.EachListItem(result, func(obj runtime.Object) error { m, err := meta.Accessor(obj) if err != nil { @@ -101,6 +105,7 @@ func transformResponseObject(ctx context.Context, scope RequestScope, req *http. return } encoder := metainternalversion.Codecs.EncoderForVersion(info.Serializer, metav1beta1.SchemeGroupVersion) + trace.Step(fmt.Sprintf("Serializing response as type %s", info.MediaType)) responsewriters.SerializeObject(info.MediaType, encoder, w, req, statusCode, list) return @@ -109,17 +114,20 @@ func transformResponseObject(ctx context.Context, scope RequestScope, req *http. // TODO: skip if this is a status response (delete without body)? opts := &metav1beta1.TableOptions{} + trace.Step("Decoding parameters") if err := metav1beta1.ParameterCodec.DecodeParameters(req.URL.Query(), metav1beta1.SchemeGroupVersion, opts); err != nil { scope.err(err, w, req) return } + trace.Step("Converting to table") table, err := scope.TableConvertor.ConvertToTable(ctx, result, opts) if err != nil { scope.err(err, w, req) return } + trace.Step("Processing rows") for i := range table.Rows { item := &table.Rows[i] switch opts.IncludeObject { @@ -156,6 +164,7 @@ func transformResponseObject(ctx context.Context, scope RequestScope, req *http. return } encoder := metainternalversion.Codecs.EncoderForVersion(info.Serializer, metav1beta1.SchemeGroupVersion) + trace.Step(fmt.Sprintf("Serializing response as type %s", info.MediaType)) responsewriters.SerializeObject(info.MediaType, encoder, w, req, statusCode, table) return @@ -164,11 +173,13 @@ func transformResponseObject(ctx context.Context, scope RequestScope, req *http. accepted, _ := negotiation.MediaTypesForSerializer(metainternalversion.Codecs) err := negotiation.NewNotAcceptableError(accepted) status := responsewriters.ErrorToAPIStatus(err) + trace.Step("Writing raw JSON response") responsewriters.WriteRawJSON(int(status.Code), status, w) return } } + trace.Step("Writing response") responsewriters.WriteObject(statusCode, scope.Kind.GroupVersion(), scope.Serializer, result, w, req) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go index 8b3ca9d62bc16..3a0456cabc3e8 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go @@ -20,12 +20,15 @@ import ( "context" "encoding/hex" "fmt" + "io" "io/ioutil" "net/http" "net/url" + goruntime "runtime" + "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -33,13 +36,13 @@ import ( metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" + utiltrace "k8s.io/apiserver/pkg/util/trace" openapiproto "k8s.io/kube-openapi/pkg/util/proto" ) @@ -56,15 +59,21 @@ type RequestScope struct { Typer runtime.ObjectTyper UnsafeConvertor runtime.ObjectConvertor Authorizer authorizer.Authorizer + Trace *utiltrace.Trace TableConvertor rest.TableConvertor - OpenAPISchema openapiproto.Schema + OpenAPIModels openapiproto.Models Resource schema.GroupVersionResource Kind schema.GroupVersionKind Subresource string MetaGroupVersion schema.GroupVersion + + // HubGroupVersion indicates what version objects read from etcd or incoming requests should be converted to for in-memory handling. + HubGroupVersion schema.GroupVersion + + MaxRequestBodyBytes int64 } func (scope *RequestScope) err(err error, w http.ResponseWriter, req *http.Request) { @@ -177,10 +186,17 @@ func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, panicCh := make(chan interface{}, 1) go func() { // panics don't cross goroutine boundaries, so we have to handle ourselves - defer utilruntime.HandleCrash(func(panicReason interface{}) { - // Propagate to parent goroutine - panicCh <- panicReason - }) + defer func() { + panicReason := recover() + if panicReason != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:goruntime.Stack(buf, false)] + panicReason = strings.TrimSuffix(fmt.Sprintf("%v\n%s", panicReason, string(buf)), "\n") + // Propagate to parent goroutine + panicCh <- panicReason + } + }() if result, err := fn(); err != nil { errCh <- err @@ -279,7 +295,7 @@ func setListSelfLink(obj runtime.Object, ctx context.Context, req *http.Request, return 0, err } if err := namer.SetSelfLink(obj, uri); err != nil { - glog.V(4).Infof("Unable to set self link on object: %v", err) + klog.V(4).Infof("Unable to set self link on object: %v", err) } requestInfo, ok := request.RequestInfoFrom(ctx) if !ok { @@ -311,9 +327,23 @@ func summarizeData(data []byte, maxLength int) string { } } -func readBody(req *http.Request) ([]byte, error) { +func limitedReadBody(req *http.Request, limit int64) ([]byte, error) { defer req.Body.Close() - return ioutil.ReadAll(req.Body) + if limit <= 0 { + return ioutil.ReadAll(req.Body) + } + lr := &io.LimitedReader{ + R: req.Body, + N: limit + 1, + } + data, err := ioutil.ReadAll(lr) + if err != nil { + return nil, err + } + if lr.N <= 0 { + return nil, errors.NewRequestEntityTooLargeError(fmt.Sprintf("limit is %d", limit)) + } + return data, nil } func parseTimeout(str string) time.Duration { @@ -322,7 +352,7 @@ func parseTimeout(str string) time.Duration { if err == nil { return timeout } - glog.Errorf("Failed to parse %q: %v", str, err) + klog.Errorf("Failed to parse %q: %v", str, err) } return 30 * time.Second } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go index 19d23e1f2eba0..f6bbd061a6b1e 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -64,7 +64,7 @@ func UpdateResource(r rest.Updater, scope RequestScope, admit admission.Interfac ctx := req.Context() ctx = request.WithNamespace(ctx, namespace) - body, err := readBody(req) + body, err := limitedReadBody(req, scope.MaxRequestBodyBytes) if err != nil { scope.err(err, w, req) return @@ -89,8 +89,9 @@ func UpdateResource(r rest.Updater, scope RequestScope, admit admission.Interfac } defaultGVK := scope.Kind original := r.New() + trace.Step("About to convert to expected version") - decoder := scope.Serializer.DecoderToVersion(s.Serializer, schema.GroupVersion{Group: defaultGVK.Group, Version: runtime.APIVersionInternal}) + decoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion) obj, gvk, err := decoder.Decode(body, &defaultGVK, original) if err != nil { err = transformDecodeError(scope.Typer, err, original, gvk, body) @@ -189,6 +190,7 @@ func UpdateResource(r rest.Updater, scope RequestScope, admit admission.Interfac status = http.StatusCreated } + scope.Trace = trace transformResponseObject(ctx, scope, req, w, status, result) } } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go index e6444d803c4b9..31234c82f637e 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go @@ -39,10 +39,6 @@ import ( "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/registry/rest" genericfilters "k8s.io/apiserver/pkg/server/filters" - utilopenapi "k8s.io/apiserver/pkg/util/openapi" - openapibuilder "k8s.io/kube-openapi/pkg/builder" - openapiutil "k8s.io/kube-openapi/pkg/util" - openapiproto "k8s.io/kube-openapi/pkg/util/proto" ) const ( @@ -135,17 +131,17 @@ func (a *APIInstaller) newWebService() *restful.WebService { return ws } -// getResourceKind returns the external group version kind registered for the given storage +// GetResourceKind returns the external group version kind registered for the given storage // object. If the storage object is a subresource and has an override supplied for it, it returns // the group version kind supplied in the override. -func (a *APIInstaller) getResourceKind(path string, storage rest.Storage) (schema.GroupVersionKind, error) { +func GetResourceKind(groupVersion schema.GroupVersion, storage rest.Storage, typer runtime.ObjectTyper) (schema.GroupVersionKind, error) { // Let the storage tell us exactly what GVK it has if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok { - return gvkProvider.GroupVersionKind(a.group.GroupVersion), nil + return gvkProvider.GroupVersionKind(groupVersion), nil } object := storage.New() - fqKinds, _, err := a.group.Typer.ObjectKinds(object) + fqKinds, _, err := typer.ObjectKinds(object) if err != nil { return schema.GroupVersionKind{}, err } @@ -154,13 +150,13 @@ func (a *APIInstaller) getResourceKind(path string, storage rest.Storage) (schem // we're trying to register here fqKindToRegister := schema.GroupVersionKind{} for _, fqKind := range fqKinds { - if fqKind.Group == a.group.GroupVersion.Group { - fqKindToRegister = a.group.GroupVersion.WithKind(fqKind.Kind) + if fqKind.Group == groupVersion.Group { + fqKindToRegister = groupVersion.WithKind(fqKind.Kind) break } } if fqKindToRegister.Empty() { - return schema.GroupVersionKind{}, fmt.Errorf("unable to locate fully qualified kind for %v: found %v when registering for %v", reflect.TypeOf(object), fqKinds, a.group.GroupVersion) + return schema.GroupVersionKind{}, fmt.Errorf("unable to locate fully qualified kind for %v: found %v when registering for %v", reflect.TypeOf(object), fqKinds, groupVersion) } // group is guaranteed to match based on the check above @@ -180,7 +176,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag return nil, err } - fqKindToRegister, err := a.getResourceKind(path, storage) + fqKindToRegister, err := GetResourceKind(a.group.GroupVersion, storage, a.group.Typer) if err != nil { return nil, err } @@ -412,7 +408,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag namespaceParamName := "namespaces" // Handler for standard REST verbs (GET, PUT, POST and DELETE). namespaceParam := ws.PathParameter("namespace", "object name and auth scope, such as for teams and projects").DataType("string") - namespacedPath := namespaceParamName + "/{" + "namespace" + "}/" + resource + namespacedPath := namespaceParamName + "/{namespace}/" + resource namespaceParams := []*restful.Parameter{namespaceParam} resourcePath := namespacedPath @@ -506,15 +502,16 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Subresource: subresource, Kind: fqKindToRegister, + HubGroupVersion: schema.GroupVersion{Group: fqKindToRegister.Group, Version: runtime.APIVersionInternal}, + MetaGroupVersion: metav1.SchemeGroupVersion, + + MaxRequestBodyBytes: a.group.MaxRequestBodyBytes, } if a.group.MetaGroupVersion != nil { reqScope.MetaGroupVersion = *a.group.MetaGroupVersion } - reqScope.OpenAPISchema, err = a.getOpenAPISchema(ws.RootPath(), resource, fqKindToRegister, defaultVersionedObject) - if err != nil { - return nil, fmt.Errorf("unable to get openapi schema for %v: %v", fqKindToRegister, err) - } + reqScope.OpenAPIModels = a.group.OpenAPIModels for _, action := range actions { producedObject := storageMeta.ProducesObject(action.Verb) if producedObject == nil { @@ -556,7 +553,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag return nil, fmt.Errorf("missing parent storage: %q", resource) } - fqParentKind, err := a.getResourceKind(resource, parentStorage) + fqParentKind, err := GetResourceKind(a.group.GroupVersion, parentStorage, a.group.Typer) if err != nil { return nil, err } @@ -736,6 +733,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Returns(http.StatusAccepted, "Accepted", versionedStatus) if isGracefulDeleter { route.Reads(versionedDeleterObject) + route.ParameterNamed("body").Required(false) if err := addObjectParams(ws, route, versionedDeleteOptions); err != nil { return nil, err } @@ -870,24 +868,6 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag return &apiResource, nil } -// getOpenAPISchema builds the openapi schema for a single resource model to be given to each handler. It will -// return nil if the apiserver doesn't have openapi enabled, or if the specific path should be ignored by openapi. -func (a *APIInstaller) getOpenAPISchema(rootPath, resource string, kind schema.GroupVersionKind, sampleObject interface{}) (openapiproto.Schema, error) { - path := gpath.Join(rootPath, resource) - if a.group.OpenAPIConfig == nil { - return nil, nil - } - pathsToIgnore := openapiutil.NewTrie(a.group.OpenAPIConfig.IgnorePrefixes) - if pathsToIgnore.HasPrefix(path) { - return nil, nil - } - openAPIDefinitions, err := openapibuilder.BuildOpenAPIDefinitionsForResource(sampleObject, a.group.OpenAPIConfig) - if err != nil { - return nil, err - } - return utilopenapi.ToProtoSchema(openAPIDefinitions, kind) -} - // indirectArbitraryPointer returns *ptrToObject for an arbitrary pointer func indirectArbitraryPointer(ptrToObject interface{}) interface{} { return reflect.Indirect(reflect.ValueOf(ptrToObject)).Interface() diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD.bazel index 390078165cf72..e4cd23bf538b1 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD.bazel @@ -11,12 +11,12 @@ go_library( importpath = "k8s.io/apiserver/pkg/endpoints/request", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/validation/path:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go index 95166f5c47418..fe3ae38edcd7d 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go @@ -36,6 +36,9 @@ const ( // auditKey is the context key for the audit event. auditKey + + // audiencesKey is the context key for request audiences. + audiencesKey ) // NewContext instantiates a base context object for request flows. diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go index 1520bb3c9e515..cc8ae39fa2c5c 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go @@ -27,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - "github.com/golang/glog" + "k8s.io/klog" ) // LongRunningRequestCheck is a predicate which is true for long-running http requests. @@ -210,7 +210,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er opts := metainternalversion.ListOptions{} if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { // An error in parsing request will result in default to "list" and not setting "name" field. - glog.Errorf("Couldn't parse request %#v: %v", req.URL.Query(), err) + klog.Errorf("Couldn't parse request %#v: %v", req.URL.Query(), err) // Reset opts to not rely on partial results from parsing. // However, if watch is set, let's report it. opts = metainternalversion.ListOptions{} diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index e06f8919fae2d..92418256814cf 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -52,6 +52,13 @@ const ( // audited. AdvancedAuditing utilfeature.Feature = "AdvancedAuditing" + // owner: @pbarker + // alpha: v1.13 + // + // DynamicAuditing enables configuration of audit policy and webhook backends through an + // AuditSink API object. + DynamicAuditing utilfeature.Feature = "DynamicAuditing" + // owner: @ilackams // alpha: v1.7 // @@ -75,6 +82,7 @@ const ( // owner: @apelisse // alpha: v1.12 + // beta: v1.13 // // Allow requests to be processed but not stored, so that // validation, merging, mutation can be tested without @@ -93,8 +101,9 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS StreamingProxyRedirects: {Default: true, PreRelease: utilfeature.Beta}, ValidateProxyRedirects: {Default: false, PreRelease: utilfeature.Alpha}, AdvancedAuditing: {Default: true, PreRelease: utilfeature.GA}, + DynamicAuditing: {Default: false, PreRelease: utilfeature.Alpha}, APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha}, Initializers: {Default: false, PreRelease: utilfeature.Alpha}, APIListChunking: {Default: true, PreRelease: utilfeature.Beta}, - DryRun: {Default: false, PreRelease: utilfeature.Alpha}, + DryRun: {Default: true, PreRelease: utilfeature.Beta}, } diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD.bazel index ac859686988a3..ae3e90e1acaa7 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD.bazel @@ -12,7 +12,6 @@ go_library( importpath = "k8s.io/apiserver/pkg/registry/generic", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -20,5 +19,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/BUILD.bazel index 18083828379f5..16179fd37ec3c 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/BUILD.bazel @@ -13,7 +13,6 @@ go_library( importpath = "k8s.io/apiserver/pkg/registry/generic/registry", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/validation/path:go_default_library", @@ -40,5 +39,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/dryrun:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go index fc93cc4d25b60..4552475070521 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go @@ -19,7 +19,7 @@ package registry import ( "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" @@ -43,10 +43,10 @@ func StorageWithCacher(capacity int) generic.StorageDecorator { s, d := generic.NewRawStorage(storageConfig) if capacity == 0 { - glog.V(5).Infof("Storage caching is disabled for %T", objectType) + klog.V(5).Infof("Storage caching is disabled for %T", objectType) return s, d } - glog.V(5).Infof("Storage caching is enabled for %T with capacity %v", objectType, capacity) + klog.V(5).Infof("Storage caching is enabled for %T with capacity %v", objectType, capacity) // TODO: we would change this later to make storage always have cacher and hide low level KV layer inside. // Currently it has two layers of same storage interface -- cacher and low level kv. diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go index 615637d8dee3b..2dcf99eae5275 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -47,7 +47,7 @@ import ( "k8s.io/apiserver/pkg/storage/etcd/metrics" "k8s.io/apiserver/pkg/util/dryrun" - "github.com/golang/glog" + "k8s.io/klog" ) // ObjectFunc is a function to act on a given object. An error may be returned @@ -501,7 +501,7 @@ func (e *Store) shouldDeleteForFailedInitialization(ctx context.Context, obj run // Used for objects that are either been finalized or have never initialized. func (e *Store) deleteWithoutFinalizers(ctx context.Context, name, key string, obj runtime.Object, preconditions *storage.Preconditions, dryRun bool) (runtime.Object, bool, error) { out := e.NewFunc() - glog.V(6).Infof("going to delete %s from registry, triggered by update", name) + klog.V(6).Infof("going to delete %s from registry, triggered by update", name) if err := e.Storage.Delete(ctx, key, out, preconditions, dryRun); err != nil { // Deletion is racy, i.e., there could be multiple update // requests to remove all finalizers from the object, so we @@ -909,7 +909,7 @@ func (e *Store) updateForGracefulDeletionAndFinalizers(ctx context.Context, name if !graceful { // set the DeleteGracePeriods to 0 if the object has pendingFinalizers but not supporting graceful deletion if pendingFinalizers { - glog.V(6).Infof("update the DeletionTimestamp to \"now\" and GracePeriodSeconds to 0 for object %s, because it has pending finalizers", name) + klog.V(6).Infof("update the DeletionTimestamp to \"now\" and GracePeriodSeconds to 0 for object %s, because it has pending finalizers", name) err = markAsDeleting(existing) if err != nil { return nil, err @@ -1017,7 +1017,7 @@ func (e *Store) Delete(ctx context.Context, name string, options *metav1.DeleteO } // delete immediately, or no graceful deletion supported - glog.V(6).Infof("going to delete %s from registry: ", name) + klog.V(6).Infof("going to delete %s from registry: ", name) out = e.NewFunc() if err := e.Storage.Delete(ctx, key, out, &preconditions, dryrun.IsDryRun(options.DryRun)); err != nil { // Please refer to the place where we set ignoreNotFound for the reason @@ -1103,7 +1103,7 @@ func (e *Store) DeleteCollection(ctx context.Context, options *metav1.DeleteOpti return } if _, _, err := e.Delete(ctx, accessor.GetName(), options); err != nil && !kubeerr.IsNotFound(err) { - glog.V(4).Infof("Delete %s in DeleteCollection failed: %v", accessor.GetName(), err) + klog.V(4).Infof("Delete %s in DeleteCollection failed: %v", accessor.GetName(), err) errs <- err return } @@ -1246,7 +1246,7 @@ func (e *Store) Export(ctx context.Context, name string, opts metav1.ExportOptio if accessor, err := meta.Accessor(obj); err == nil { exportObjectMeta(accessor, opts.Exact) } else { - glog.V(4).Infof("Object of type %v does not have ObjectMeta: %v", reflect.TypeOf(obj), err) + klog.V(4).Infof("Object of type %v does not have ObjectMeta: %v", reflect.TypeOf(obj), err) } if e.ExportStrategy != nil { @@ -1411,12 +1411,12 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { func (e *Store) startObservingCount(period time.Duration) func() { prefix := e.KeyRootFunc(genericapirequest.NewContext()) resourceName := e.DefaultQualifiedResource.String() - glog.V(2).Infof("Monitoring %v count at /%v", resourceName, prefix) + klog.V(2).Infof("Monitoring %v count at /%v", resourceName, prefix) stopCh := make(chan struct{}) go wait.JitterUntil(func() { count, err := e.Storage.Count(prefix) if err != nil { - glog.V(5).Infof("Failed to update storage count metric: %v", err) + klog.V(5).Infof("Failed to update storage count metric: %v", err) metrics.UpdateObjectCount(resourceName, -1) } else { metrics.UpdateObjectCount(resourceName, count) diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go index 94a4794422c05..858ad922af4b6 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go @@ -17,11 +17,11 @@ limitations under the License. package generic import ( - "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/apiserver/pkg/storage/storagebackend/factory" + "k8s.io/klog" ) // StorageDecorator is a function signature for producing a storage.Interface @@ -54,7 +54,7 @@ func UndecoratedStorage( func NewRawStorage(config *storagebackend.Config) (storage.Interface, factory.DestroyFunc) { s, d, err := factory.Create(*config) if err != nil { - glog.Fatalf("Unable to create storage backend: config (%v), err (%v)", config, err) + klog.Fatalf("Unable to create storage backend: config (%v), err (%v)", config, err) } return s, d } diff --git a/vendor/k8s.io/apiserver/pkg/server/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/BUILD.bazel index b079ef4a6975c..dc40887398e3b 100644 --- a/vendor/k8s.io/apiserver/pkg/server/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/server/BUILD.bazel @@ -24,8 +24,8 @@ go_library( "//vendor/github.com/coreos/go-systemd/daemon:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", + "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -71,9 +71,14 @@ go_library( "//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/openapi:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/util/cert:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/builder:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index e7b0a8ecca02a..0299f63f68ab3 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -26,12 +26,14 @@ import ( "sort" "strconv" "strings" + "sync/atomic" "time" "github.com/emicklei/go-restful-swagger12" + jsonpatch "github.com/evanphx/json-patch" "github.com/go-openapi/spec" - "github.com/golang/glog" "github.com/pborman/uuid" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -157,6 +159,13 @@ type Config struct { // If specified, long running requests such as watch will be allocated a random timeout between this value, and // twice this value. Note that it is up to the request handlers to ignore or honor this timeout. In seconds. MinRequestTimeout int + // The limit on the total size increase all "copy" operations in a json + // patch may cause. + // This affects all places that applies json patch in the binary. + JSONPatchMaxCopyBytes int64 + // The limit on the request body size that would be accepted and decoded in a write request. + // 0 means no limit. + MaxRequestBodyBytes int64 // MaxRequestsInFlight is the maximum number of parallel non-long-running requests. Every further // request has to wait. Applies only to non-mutating requests. MaxRequestsInFlight int @@ -227,6 +236,9 @@ type SecureServingInfo struct { } type AuthenticationInfo struct { + // APIAudiences is a list of identifier that the API identifies as. This is + // used by some authenticators to validate audience bound credentials. + APIAudiences authenticator.Audiences // Authenticator determines which subject is making the request Authenticator authenticator.Request // SupportsBasicAuth indicates that's at least one Authenticator supports basic auth @@ -244,20 +256,36 @@ type AuthorizationInfo struct { // NewConfig returns a Config struct with the default values func NewConfig(codecs serializer.CodecFactory) *Config { return &Config{ - Serializer: codecs, - BuildHandlerChainFunc: DefaultBuildHandlerChain, - HandlerChainWaitGroup: new(utilwaitgroup.SafeWaitGroup), - LegacyAPIGroupPrefixes: sets.NewString(DefaultLegacyAPIPrefix), - DisabledPostStartHooks: sets.NewString(), - HealthzChecks: []healthz.HealthzChecker{healthz.PingHealthz, healthz.LogHealthz}, - EnableIndex: true, - EnableDiscovery: true, - EnableProfiling: true, - EnableMetrics: true, - MaxRequestsInFlight: 400, - MaxMutatingRequestsInFlight: 200, - RequestTimeout: time.Duration(60) * time.Second, - MinRequestTimeout: 1800, + Serializer: codecs, + BuildHandlerChainFunc: DefaultBuildHandlerChain, + HandlerChainWaitGroup: new(utilwaitgroup.SafeWaitGroup), + LegacyAPIGroupPrefixes: sets.NewString(DefaultLegacyAPIPrefix), + DisabledPostStartHooks: sets.NewString(), + HealthzChecks: []healthz.HealthzChecker{healthz.PingHealthz, healthz.LogHealthz}, + EnableIndex: true, + EnableDiscovery: true, + EnableProfiling: true, + EnableMetrics: true, + MaxRequestsInFlight: 400, + MaxMutatingRequestsInFlight: 200, + RequestTimeout: time.Duration(60) * time.Second, + MinRequestTimeout: 1800, + // 10MB is the recommended maximum client request size in bytes + // the etcd server should accept. See + // https://github.com/etcd-io/etcd/blob/release-3.3/etcdserver/server.go#L90. + // A request body might be encoded in json, and is converted to + // proto when persisted in etcd. Assuming the upper bound of + // the size ratio is 10:1, we set 100MB as the largest size + // increase the "copy" operations in a json patch may cause. + JSONPatchMaxCopyBytes: int64(100 * 1024 * 1024), + // 10MB is the recommended maximum client request size in bytes + // the etcd server should accept. See + // https://github.com/etcd-io/etcd/blob/release-3.3/etcdserver/server.go#L90. + // A request body might be encoded in json, and is converted to + // proto when persisted in etcd. Assuming the upper bound of + // the size ratio is 10:1, we set 100MB as the largest request + // body size to be accepted and decoded in a write request. + MaxRequestBodyBytes: int64(100 * 1024 * 1024), EnableAPIResponseCompression: utilfeature.DefaultFeatureGate.Enabled(features.APIResponseCompression), // Default to treating watch as a long-running operation @@ -355,11 +383,11 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo // if there is no port, and we listen on one securely, use that one if _, _, err := net.SplitHostPort(c.ExternalAddress); err != nil { if c.SecureServing == nil { - glog.Fatalf("cannot derive external address port without listening on a secure port.") + klog.Fatalf("cannot derive external address port without listening on a secure port.") } _, port, err := c.SecureServing.HostPort() if err != nil { - glog.Fatalf("cannot derive external address from the secure port: %v", err) + klog.Fatalf("cannot derive external address from the secure port: %v", err) } c.ExternalAddress = net.JoinHostPort(c.ExternalAddress, strconv.Itoa(port)) } @@ -475,6 +503,20 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G DiscoveryGroupManager: discovery.NewRootAPIsHandler(c.DiscoveryAddresses, c.Serializer), enableAPIResponseCompression: c.EnableAPIResponseCompression, + maxRequestBodyBytes: c.MaxRequestBodyBytes, + } + + for { + if c.JSONPatchMaxCopyBytes <= 0 { + break + } + existing := atomic.LoadInt64(&jsonpatch.AccumulatedCopySizeLimit) + if existing > 0 && existing < c.JSONPatchMaxCopyBytes { + break + } + if atomic.CompareAndSwapInt64(&jsonpatch.AccumulatedCopySizeLimit, existing, c.JSONPatchMaxCopyBytes) { + break + } } for k, v := range delegationTarget.PostStartHooks() { @@ -534,7 +576,7 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { handler = genericapifilters.WithAudit(handler, c.AuditBackend, c.AuditPolicyChecker, c.LongRunningFunc) failedHandler := genericapifilters.Unauthorized(c.Serializer, c.Authentication.SupportsBasicAuth) failedHandler = genericapifilters.WithFailedAuthenticationAudit(failedHandler, c.AuditBackend, c.AuditPolicyChecker) - handler = genericapifilters.WithAuthentication(handler, c.Authentication.Authenticator, failedHandler) + handler = genericapifilters.WithAuthentication(handler, c.Authentication.Authenticator, failedHandler, c.Authentication.APIAudiences) handler = genericfilters.WithCORS(handler, c.CorsAllowedOriginList, nil, nil, nil, "true") handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.LongRunningFunc, c.RequestTimeout) handler = genericfilters.WithWaitGroup(handler, c.LongRunningFunc, c.HandlerChainWaitGroup) diff --git a/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go b/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go index 2af16bf9b7ae7..a78250edae9bf 100644 --- a/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go @@ -21,8 +21,9 @@ import ( "net/http" "time" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/client-go/rest" ) @@ -45,9 +46,9 @@ func (s *DeprecatedInsecureServingInfo) Serve(handler http.Handler, shutdownTime } if len(s.Name) > 0 { - glog.Infof("Serving %s insecurely on %s", s.Name, s.Listener.Addr()) + klog.Infof("Serving %s insecurely on %s", s.Name, s.Listener.Addr()) } else { - glog.Infof("Serving insecurely on %s", s.Listener.Addr()) + klog.Infof("Serving insecurely on %s", s.Listener.Addr()) } return RunServer(insecureServer, s.Listener, shutdownTimeout, stopCh) } @@ -77,9 +78,13 @@ func (s *DeprecatedInsecureServingInfo) NewLoopbackClientConfig() (*rest.Config, // but allows apiserver code to stop special-casing a nil user to skip authorization checks. type InsecureSuperuser struct{} -func (InsecureSuperuser) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { - return &user.DefaultInfo{ - Name: "system:unsecured", - Groups: []string{user.SystemPrivilegedGroup, user.AllAuthenticated}, +func (InsecureSuperuser) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + auds, _ := authenticator.AudiencesFrom(req.Context()) + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: "system:unsecured", + Groups: []string{user.SystemPrivilegedGroup, user.AllAuthenticated}, + }, + Audiences: auds, }, true, nil } diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/filters/BUILD.bazel index 3387d3001264a..2240271c7be11 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/server/filters/BUILD.bazel @@ -17,7 +17,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -28,5 +27,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/httplog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/cors.go b/vendor/k8s.io/apiserver/pkg/server/filters/cors.go index 2c6e66ed6ba6d..96ff58dc7c889 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/cors.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/cors.go @@ -21,7 +21,7 @@ import ( "regexp" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) // TODO: use restful.CrossOriginResourceSharing @@ -79,7 +79,7 @@ func WithCORS(handler http.Handler, allowedOriginPatterns []string, allowedMetho func allowedOriginRegexps(allowedOrigins []string) []*regexp.Regexp { res, err := compileRegexps(allowedOrigins) if err != nil { - glog.Fatalf("Invalid CORS allowed origin, --cors-allowed-origins flag was set to %v - %v", strings.Join(allowedOrigins, ","), err) + klog.Fatalf("Invalid CORS allowed origin, --cors-allowed-origins flag was set to %v - %v", strings.Join(allowedOrigins, ","), err) } return res } diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go index 78700c33a8862..8818cb5633f38 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -28,7 +28,7 @@ import ( "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -45,9 +45,9 @@ const ( var nonMutatingRequestVerbs = sets.NewString("get", "list", "watch") func handleError(w http.ResponseWriter, r *http.Request, err error) { - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "Internal Server Error: %#v", r.RequestURI) - glog.Errorf(err.Error()) + errorMsg := fmt.Sprintf("Internal Server Error: %#v", r.RequestURI) + http.Error(w, errorMsg, http.StatusInternalServerError) + klog.Errorf(err.Error()) } // requestWatermark is used to trak maximal usage of inflight requests. diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go index 7d37ed05a3ded..adb179f82357f 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go @@ -23,6 +23,7 @@ import ( "fmt" "net" "net/http" + "runtime" "sync" "time" @@ -91,16 +92,23 @@ func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - result := make(chan interface{}) + errCh := make(chan interface{}) tw := newTimeoutWriter(w) go func() { defer func() { - result <- recover() + err := recover() + if err != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + err = fmt.Sprintf("%v\n%s", err, buf) + } + errCh <- err }() t.handler.ServeHTTP(tw, r) }() select { - case err := <-result: + case err := <-errCh: if err != nil { panic(err) } diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go b/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go index 38742ffd9a212..0a75845611dc0 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/wrap.go @@ -18,9 +18,8 @@ package filters import ( "net/http" - "runtime/debug" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/server/httplog" @@ -28,10 +27,16 @@ import ( // WithPanicRecovery wraps an http Handler to recover and log panics. func WithPanicRecovery(handler http.Handler) http.Handler { + return withPanicRecovery(handler, func(w http.ResponseWriter, req *http.Request, err interface{}) { + http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError) + klog.Errorf("apiserver panic'd on %v %v", req.Method, req.RequestURI) + }) +} + +func withPanicRecovery(handler http.Handler, crashHandler func(http.ResponseWriter, *http.Request, interface{})) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { defer runtime.HandleCrash(func(err interface{}) { - http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError) - glog.Errorf("apiserver panic'd on %v %v: %v\n%s\n", req.Method, req.RequestURI, err, debug.Stack()) + crashHandler(w, req, err) }) logger := httplog.NewLogged(req, &w) diff --git a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go index b6e500c612312..011de3c7061a8 100644 --- a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -19,13 +19,14 @@ package server import ( "fmt" "net/http" + gpath "path" "strings" "sync" "time" systemd "github.com/coreos/go-systemd/daemon" "github.com/emicklei/go-restful-swagger12" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,8 +43,12 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/routes" + utilopenapi "k8s.io/apiserver/pkg/util/openapi" restclient "k8s.io/client-go/rest" + openapibuilder "k8s.io/kube-openapi/pkg/builder" openapicommon "k8s.io/kube-openapi/pkg/common" + openapiutil "k8s.io/kube-openapi/pkg/util" + openapiproto "k8s.io/kube-openapi/pkg/util/proto" ) // Info about an API group. @@ -153,6 +158,10 @@ type GenericAPIServer struct { // HandlerChainWaitGroup allows you to wait for all chain handlers finish after the server shutdown. HandlerChainWaitGroup *utilwaitgroup.SafeWaitGroup + + // The limit on the request body size that would be accepted and decoded in a write request. + // 0 means no limit. + maxRequestBodyBytes int64 } // DelegationTarget is an interface which allows for composition of API servers with top level handling that works @@ -312,7 +321,7 @@ func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) error { s.RunPostStartHooks(stopCh) if _, err := systemd.SdNotify(true, "READY=1\n"); err != nil { - glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err) + klog.Errorf("Unable to send systemd daemon successful start message: %v\n", err) } return nil @@ -320,9 +329,13 @@ func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) error { // installAPIResources is a private method for installing the REST storage backing each api groupversionresource func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *APIGroupInfo) error { + openAPIGroupModels, err := s.getOpenAPIModelsForGroup(apiPrefix, apiGroupInfo) + if err != nil { + return fmt.Errorf("unable to get openapi models for group %v: %v", apiPrefix, err) + } for _, groupVersion := range apiGroupInfo.PrioritizedVersions { if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 { - glog.Warningf("Skipping API %v because it has no resources.", groupVersion) + klog.Warningf("Skipping API %v because it has no resources.", groupVersion) continue } @@ -330,6 +343,8 @@ func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *A if apiGroupInfo.OptionsExternalVersion != nil { apiGroupVersion.OptionsExternalVersion = apiGroupInfo.OptionsExternalVersion } + apiGroupVersion.OpenAPIModels = openAPIGroupModels + apiGroupVersion.MaxRequestBodyBytes = s.maxRequestBodyBytes if err := apiGroupVersion.InstallREST(s.Handler.GoRestfulContainer); err != nil { return fmt.Errorf("unable to setup API %v: %v", apiGroupInfo, err) @@ -427,7 +442,6 @@ func (s *GenericAPIServer) newAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupV Admit: s.admissionControl, MinRequestTimeout: s.minRequestTimeout, EnableAPIResponseCompression: s.enableAPIResponseCompression, - OpenAPIConfig: s.openAPIConfig, Authorizer: s.Authorizer, } } @@ -445,3 +459,37 @@ func NewDefaultAPIGroupInfo(group string, scheme *runtime.Scheme, parameterCodec NegotiatedSerializer: codecs, } } + +// getOpenAPIModelsForGroup is a private method for getting the OpenAPI Schemas for each api group +func (s *GenericAPIServer) getOpenAPIModelsForGroup(apiPrefix string, apiGroupInfo *APIGroupInfo) (openapiproto.Models, error) { + if s.openAPIConfig == nil { + return nil, nil + } + pathsToIgnore := openapiutil.NewTrie(s.openAPIConfig.IgnorePrefixes) + // Get the canonical names of every resource we need to build in this api group + resourceNames := make([]string, 0) + for _, groupVersion := range apiGroupInfo.PrioritizedVersions { + for resource, storage := range apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version] { + path := gpath.Join(apiPrefix, groupVersion.Group, groupVersion.Version, resource) + if !pathsToIgnore.HasPrefix(path) { + kind, err := genericapi.GetResourceKind(groupVersion, storage, apiGroupInfo.Scheme) + if err != nil { + return nil, err + } + sampleObject, err := apiGroupInfo.Scheme.New(kind) + if err != nil { + return nil, err + } + name := openapiutil.GetCanonicalTypeName(sampleObject) + resourceNames = append(resourceNames, name) + } + } + } + + // Build the openapi definitions for those resources and convert it to proto models + openAPISpec, err := openapibuilder.BuildOpenAPIDefinitionsForResources(s.openAPIConfig, resourceNames...) + if err != nil { + return nil, err + } + return utilopenapi.ToProtoModels(openAPISpec) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/handler.go b/vendor/k8s.io/apiserver/pkg/server/handler.go index e4e7d9aee06b6..0277bac7788fd 100644 --- a/vendor/k8s.io/apiserver/pkg/server/handler.go +++ b/vendor/k8s.io/apiserver/pkg/server/handler.go @@ -25,7 +25,7 @@ import ( "strings" "github.com/emicklei/go-restful" - "github.com/golang/glog" + "k8s.io/klog" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -130,7 +130,7 @@ func (d director) ServeHTTP(w http.ResponseWriter, req *http.Request) { // normally these are passed to the nonGoRestfulMux, but if discovery is enabled, it will go directly. // We can't rely on a prefix match since /apis matches everything (see the big comment on Director above) if path == "/apis" || path == "/apis/" { - glog.V(5).Infof("%v: %v %q satisfied by gorestful with webservice %v", d.name, req.Method, path, ws.RootPath()) + klog.V(5).Infof("%v: %v %q satisfied by gorestful with webservice %v", d.name, req.Method, path, ws.RootPath()) // don't use servemux here because gorestful servemuxes get messed up when removing webservices // TODO fix gorestful, remove TPRs, or stop using gorestful d.goRestfulContainer.Dispatch(w, req) @@ -140,7 +140,7 @@ func (d director) ServeHTTP(w http.ResponseWriter, req *http.Request) { case strings.HasPrefix(path, ws.RootPath()): // ensure an exact match or a path boundary match if len(path) == len(ws.RootPath()) || path[len(ws.RootPath())] == '/' { - glog.V(5).Infof("%v: %v %q satisfied by gorestful with webservice %v", d.name, req.Method, path, ws.RootPath()) + klog.V(5).Infof("%v: %v %q satisfied by gorestful with webservice %v", d.name, req.Method, path, ws.RootPath()) // don't use servemux here because gorestful servemuxes get messed up when removing webservices // TODO fix gorestful, remove TPRs, or stop using gorestful d.goRestfulContainer.Dispatch(w, req) @@ -150,7 +150,7 @@ func (d director) ServeHTTP(w http.ResponseWriter, req *http.Request) { } // if we didn't find a match, then we just skip gorestful altogether - glog.V(5).Infof("%v: %v %q satisfied by nonGoRestful", d.name, req.Method, path) + klog.V(5).Infof("%v: %v %q satisfied by nonGoRestful", d.name, req.Method, path) d.nonGoRestfulMux.ServeHTTP(w, req) } @@ -165,7 +165,7 @@ func logStackOnRecover(s runtime.NegotiatedSerializer, panicReason interface{}, } buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line)) } - glog.Errorln(buffer.String()) + klog.Errorln(buffer.String()) headers := http.Header{} if ct := w.Header().Get("Content-Type"); len(ct) > 0 { diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD.bazel index 2046f82aed7cb..30bc13dff4126 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD.bazel @@ -10,7 +10,8 @@ go_library( importpath = "k8s.io/apiserver/pkg/server/healthz", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go index 24fafcefa0f3b..17d85fbe637ed 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go @@ -25,8 +25,9 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" ) @@ -76,7 +77,7 @@ func (l *log) Check(_ *http.Request) error { l.startOnce.Do(func() { l.lastVerified.Store(time.Now()) go wait.Forever(func() { - glog.Flush() + klog.Flush() l.lastVerified.Store(time.Now()) }, time.Minute) }) @@ -108,11 +109,11 @@ func InstallHandler(mux mux, checks ...HealthzChecker) { // result in a panic. func InstallPathHandler(mux mux, path string, checks ...HealthzChecker) { if len(checks) == 0 { - glog.V(5).Info("No default health checks specified. Installing the ping handler.") + klog.V(5).Info("No default health checks specified. Installing the ping handler.") checks = []HealthzChecker{PingHealthz} } - glog.V(5).Info("Installing healthz checkers:", strings.Join(checkerNames(checks...), ", ")) + klog.V(5).Info("Installing healthz checkers:", formatQuoted(checkerNames(checks...)...)) mux.Handle(path, handleRootHealthz(checks...)) for _, check := range checks { @@ -141,22 +142,43 @@ func (c *healthzCheck) Check(r *http.Request) error { return c.check(r) } +// getExcludedChecks extracts the health check names to be excluded from the query param +func getExcludedChecks(r *http.Request) sets.String { + checks, found := r.URL.Query()["exclude"] + if found { + return sets.NewString(checks...) + } + return sets.NewString() +} + // handleRootHealthz returns an http.HandlerFunc that serves the provided checks. func handleRootHealthz(checks ...HealthzChecker) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { failed := false + excluded := getExcludedChecks(r) var verboseOut bytes.Buffer for _, check := range checks { + // no-op the check if we've specified we want to exclude the check + if excluded.Has(check.Name()) { + excluded.Delete(check.Name()) + fmt.Fprintf(&verboseOut, "[+]%v excluded: ok\n", check.Name()) + continue + } if err := check.Check(r); err != nil { // don't include the error since this endpoint is public. If someone wants more detail // they should have explicit permission to the detailed checks. - glog.V(6).Infof("healthz check %v failed: %v", check.Name(), err) + klog.V(6).Infof("healthz check %v failed: %v", check.Name(), err) fmt.Fprintf(&verboseOut, "[-]%v failed: reason withheld\n", check.Name()) failed = true } else { fmt.Fprintf(&verboseOut, "[+]%v ok\n", check.Name()) } } + if excluded.Len() > 0 { + fmt.Fprintf(&verboseOut, "warn: some health checks cannot be excluded: no matches for %v\n", formatQuoted(excluded.List()...)) + klog.Warningf("cannot exclude some health checks, no health checks are installed matching %v", + formatQuoted(excluded.List()...)) + } // always be verbose on failure if failed { http.Error(w, fmt.Sprintf("%vhealthz check failed", verboseOut.String()), http.StatusInternalServerError) @@ -187,14 +209,20 @@ func adaptCheckToHandler(c func(r *http.Request) error) http.HandlerFunc { // checkerNames returns the names of the checks in the same order as passed in. func checkerNames(checks ...HealthzChecker) []string { - if len(checks) > 0 { - // accumulate the names of checks for printing them out. - checkerNames := make([]string, 0, len(checks)) - for _, check := range checks { - // quote the Name so we can disambiguate - checkerNames = append(checkerNames, fmt.Sprintf("%q", check.Name())) - } - return checkerNames + // accumulate the names of checks for printing them out. + checkerNames := make([]string, 0, len(checks)) + for _, check := range checks { + checkerNames = append(checkerNames, check.Name()) } - return nil + return checkerNames +} + +// formatQuoted returns a formatted string of the health check names, +// preserving the order passed in. +func formatQuoted(names ...string) string { + quoted := make([]string, 0, len(names)) + for _, name := range names { + quoted = append(quoted, fmt.Sprintf("%q", name)) + } + return strings.Join(quoted, ",") } diff --git a/vendor/k8s.io/apiserver/pkg/server/hooks.go b/vendor/k8s.io/apiserver/pkg/server/hooks.go index ccf8ee17ad02d..921255218bcc5 100644 --- a/vendor/k8s.io/apiserver/pkg/server/hooks.go +++ b/vendor/k8s.io/apiserver/pkg/server/hooks.go @@ -21,7 +21,7 @@ import ( "fmt" "net/http" - "github.com/golang/glog" + "k8s.io/klog" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -101,7 +101,7 @@ func (s *GenericAPIServer) AddPostStartHook(name string, hook PostStartHookFunc) // AddPostStartHookOrDie allows you to add a PostStartHook, but dies on failure func (s *GenericAPIServer) AddPostStartHookOrDie(name string, hook PostStartHookFunc) { if err := s.AddPostStartHook(name, hook); err != nil { - glog.Fatalf("Error registering PostStartHook %q: %v", name, err) + klog.Fatalf("Error registering PostStartHook %q: %v", name, err) } } @@ -132,7 +132,7 @@ func (s *GenericAPIServer) AddPreShutdownHook(name string, hook PreShutdownHookF // AddPreShutdownHookOrDie allows you to add a PostStartHook, but dies on failure func (s *GenericAPIServer) AddPreShutdownHookOrDie(name string, hook PreShutdownHookFunc) { if err := s.AddPreShutdownHook(name, hook); err != nil { - glog.Fatalf("Error registering PreShutdownHook %q: %v", name, err) + klog.Fatalf("Error registering PreShutdownHook %q: %v", name, err) } } @@ -185,7 +185,7 @@ func runPostStartHook(name string, entry postStartHookEntry, context PostStartHo }() // if the hook intentionally wants to kill server, let it. if err != nil { - glog.Fatalf("PostStartHook %q failed: %v", name, err) + klog.Fatalf("PostStartHook %q failed: %v", name, err) } close(entry.done) } diff --git a/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD.bazel index 0f06d2e833c0b..633174f855643 100644 --- a/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD.bazel @@ -9,5 +9,5 @@ go_library( importmap = "k8s.io/kops/vendor/k8s.io/apiserver/pkg/server/httplog", importpath = "k8s.io/apiserver/pkg/server/httplog", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go index f8a8a5307aaab..dcdba69225d82 100644 --- a/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go +++ b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go @@ -24,7 +24,7 @@ import ( "runtime" "time" - "github.com/golang/glog" + "k8s.io/klog" ) // StacktracePred returns true if a stacktrace should be logged for this status. @@ -61,7 +61,7 @@ type passthroughLogger struct{} // Addf logs info immediately. func (passthroughLogger) Addf(format string, data ...interface{}) { - glog.V(2).Info(fmt.Sprintf(format, data...)) + klog.V(2).Info(fmt.Sprintf(format, data...)) } // DefaultStacktracePred is the default implementation of StacktracePred. @@ -143,11 +143,11 @@ func (rl *respLogger) Addf(format string, data ...interface{}) { // Log is intended to be called once at the end of your request handler, via defer func (rl *respLogger) Log() { latency := time.Since(rl.startTime) - if glog.V(3) { + if klog.V(3) { if !rl.hijacked { - glog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) %v%v%v [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.status, rl.statusStack, rl.addedInfo, rl.req.UserAgent(), rl.req.RemoteAddr)) + klog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) %v%v%v [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.status, rl.statusStack, rl.addedInfo, rl.req.UserAgent(), rl.req.RemoteAddr)) } else { - glog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) hijacked [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.req.UserAgent(), rl.req.RemoteAddr)) + klog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) hijacked [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.req.UserAgent(), rl.req.RemoteAddr)) } } } @@ -173,8 +173,8 @@ func (rl *respLogger) Write(b []byte) (int, error) { func (rl *respLogger) Flush() { if flusher, ok := rl.w.(http.Flusher); ok { flusher.Flush() - } else if glog.V(2) { - glog.InfoDepth(1, fmt.Sprintf("Unable to convert %+v into http.Flusher", rl.w)) + } else if klog.V(2) { + klog.InfoDepth(1, fmt.Sprintf("Unable to convert %+v into http.Flusher", rl.w)) } } diff --git a/vendor/k8s.io/apiserver/pkg/server/mux/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/mux/BUILD.bazel index 9e20dd393b4d8..5327b7d56dd60 100644 --- a/vendor/k8s.io/apiserver/pkg/server/mux/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/server/mux/BUILD.bazel @@ -10,8 +10,8 @@ go_library( importpath = "k8s.io/apiserver/pkg/server/mux", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go b/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go index 2f0eb7aa5b224..16857cc8a6b84 100644 --- a/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go +++ b/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go @@ -25,7 +25,7 @@ import ( "sync" "sync/atomic" - "github.com/golang/glog" + "k8s.io/klog" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -237,20 +237,20 @@ func (m *PathRecorderMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { // ServeHTTP makes it an http.Handler func (h *pathHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if exactHandler, ok := h.pathToHandler[r.URL.Path]; ok { - glog.V(5).Infof("%v: %q satisfied by exact match", h.muxName, r.URL.Path) + klog.V(5).Infof("%v: %q satisfied by exact match", h.muxName, r.URL.Path) exactHandler.ServeHTTP(w, r) return } for _, prefixHandler := range h.prefixHandlers { if strings.HasPrefix(r.URL.Path, prefixHandler.prefix) { - glog.V(5).Infof("%v: %q satisfied by prefix %v", h.muxName, r.URL.Path, prefixHandler.prefix) + klog.V(5).Infof("%v: %q satisfied by prefix %v", h.muxName, r.URL.Path, prefixHandler.prefix) prefixHandler.handler.ServeHTTP(w, r) return } } - glog.V(5).Infof("%v: %q satisfied by NotFoundHandler", h.muxName, r.URL.Path) + klog.V(5).Infof("%v: %q satisfied by NotFoundHandler", h.muxName, r.URL.Path) h.notFoundHandler.ServeHTTP(w, r) } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/options/BUILD.bazel index 2429fc4960cc5..95117156734c5 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/server/options/BUILD.bazel @@ -12,17 +12,18 @@ go_library( "deprecated_insecure_serving.go", "doc.go", "etcd.go", + "events.go", "feature.go", "recommended.go", "server_run_options.go", "serving.go", "serving_with_loopback.go", + "webhook.go", ], importmap = "k8s.io/kops/vendor/k8s.io/apiserver/pkg/server/options", importpath = "k8s.io/apiserver/pkg/server/options", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/gopkg.in/natefinch/lumberjack.v2:go_default_library", @@ -44,6 +45,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/apiserver:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library", @@ -65,15 +67,20 @@ go_library( "//vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/webhook:go_default_library", "//vendor/k8s.io/apiserver/plugin/pkg/audit/buffered:go_default_library", + "//vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic:go_default_library", + "//vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/enforced:go_default_library", "//vendor/k8s.io/apiserver/plugin/pkg/audit/log:go_default_library", "//vendor/k8s.io/apiserver/plugin/pkg/audit/truncate:go_default_library", "//vendor/k8s.io/apiserver/plugin/pkg/audit/webhook:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/util/cert:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/server/options/OWNERS b/vendor/k8s.io/apiserver/pkg/server/options/OWNERS index 6371177fcfc06..9d6b439de8df1 100755 --- a/vendor/k8s.io/apiserver/pkg/server/options/OWNERS +++ b/vendor/k8s.io/apiserver/pkg/server/options/OWNERS @@ -9,7 +9,6 @@ reviewers: - soltysh - dims - cjcullen -- ericchiang - ping035627 - xiangpengzhao - enj diff --git a/vendor/k8s.io/apiserver/pkg/server/options/audit.go b/vendor/k8s.io/apiserver/pkg/server/options/audit.go index 71476c229a080..402a703ac6137 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/audit.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/audit.go @@ -23,21 +23,31 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/spf13/pflag" "gopkg.in/natefinch/lumberjack.v2" + "k8s.io/klog" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" + auditinternal "k8s.io/apiserver/pkg/apis/audit" auditv1 "k8s.io/apiserver/pkg/apis/audit/v1" auditv1alpha1 "k8s.io/apiserver/pkg/apis/audit/v1alpha1" auditv1beta1 "k8s.io/apiserver/pkg/apis/audit/v1beta1" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/audit/policy" + "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/server" + utilfeature "k8s.io/apiserver/pkg/util/feature" pluginbuffered "k8s.io/apiserver/plugin/pkg/audit/buffered" + plugindynamic "k8s.io/apiserver/plugin/pkg/audit/dynamic" + pluginenforced "k8s.io/apiserver/plugin/pkg/audit/dynamic/enforced" pluginlog "k8s.io/apiserver/plugin/pkg/audit/log" plugintruncate "k8s.io/apiserver/plugin/pkg/audit/truncate" pluginwebhook "k8s.io/apiserver/plugin/pkg/audit/webhook" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + restclient "k8s.io/client-go/rest" ) const ( @@ -54,6 +64,9 @@ func appendBackend(existing, newBackend audit.Backend) audit.Backend { if existing == nil { return newBackend } + if newBackend == nil { + return existing + } return audit.Union(existing, newBackend) } @@ -65,6 +78,7 @@ type AuditOptions struct { // Plugin options LogOptions AuditLogOptions WebhookOptions AuditWebhookOptions + DynamicOptions AuditDynamicOptions } const ( @@ -76,12 +90,17 @@ const ( // a set of events. This causes requests to the API server to wait for the // flush before sending a response. ModeBlocking = "blocking" + // ModeBlockingStrict is the same as ModeBlocking, except when there is + // a failure during audit logging at RequestReceived stage, the whole + // request to apiserver will fail. + ModeBlockingStrict = "blocking-strict" ) // AllowedModes is the modes known for audit backends. var AllowedModes = []string{ ModeBatch, ModeBlocking, + ModeBlockingStrict, } type AuditBatchOptions struct { @@ -129,6 +148,11 @@ type AuditWebhookOptions struct { GroupVersionString string } +type AuditDynamicOptions struct { + // Enabled tells whether the dynamic audit capability is enabled. + Enabled bool +} + func NewAuditOptions() *AuditOptions { return &AuditOptions{ WebhookOptions: AuditWebhookOptions{ @@ -137,9 +161,8 @@ func NewAuditOptions() *AuditOptions { Mode: ModeBatch, BatchConfig: defaultWebhookBatchConfig(), }, - TruncateOptions: NewAuditTruncateOptions(), - // TODO(audit): use v1 API in release 1.13 - GroupVersionString: "audit.k8s.io/v1beta1", + TruncateOptions: NewAuditTruncateOptions(), + GroupVersionString: "audit.k8s.io/v1", }, LogOptions: AuditLogOptions{ Format: pluginlog.FormatJson, @@ -147,9 +170,11 @@ func NewAuditOptions() *AuditOptions { Mode: ModeBlocking, BatchConfig: defaultLogBatchConfig(), }, - TruncateOptions: NewAuditTruncateOptions(), - // TODO(audit): use v1 API in release 1.13 - GroupVersionString: "audit.k8s.io/v1beta1", + TruncateOptions: NewAuditTruncateOptions(), + GroupVersionString: "audit.k8s.io/v1", + }, + DynamicOptions: AuditDynamicOptions{ + Enabled: false, }, } } @@ -173,6 +198,7 @@ func (o *AuditOptions) Validate() []error { var allErrors []error allErrors = append(allErrors, o.LogOptions.Validate()...) allErrors = append(allErrors, o.WebhookOptions.Validate()...) + allErrors = append(allErrors, o.DynamicOptions.Validate()...) return allErrors } @@ -252,44 +278,102 @@ func (o *AuditOptions) AddFlags(fs *pflag.FlagSet) { o.WebhookOptions.AddFlags(fs) o.WebhookOptions.BatchOptions.AddFlags(pluginwebhook.PluginName, fs) o.WebhookOptions.TruncateOptions.AddFlags(pluginwebhook.PluginName, fs) + o.DynamicOptions.AddFlags(fs) } -func (o *AuditOptions) ApplyTo(c *server.Config) error { +func (o *AuditOptions) ApplyTo( + c *server.Config, + kubeClientConfig *restclient.Config, + informers informers.SharedInformerFactory, + processInfo *ProcessInfo, + webhookOptions *WebhookOptions, +) error { if o == nil { return nil } + if c == nil { + return fmt.Errorf("server config must be non-nil") + } - // Apply advanced options. - // 1. Apply generic options. - if err := o.applyTo(c); err != nil { + // 1. Build policy checker + checker, err := o.newPolicyChecker() + if err != nil { return err } - // 2. Apply plugin options. - if err := o.LogOptions.applyTo(c); err != nil { - return err + // 2. Build log backend + var logBackend audit.Backend + if w := o.LogOptions.getWriter(); w != nil { + if checker == nil { + klog.V(2).Info("No audit policy file provided, no events will be recorded for log backend") + } else { + logBackend = o.LogOptions.newBackend(w) + } + } + + // 3. Build webhook backend + var webhookBackend audit.Backend + if o.WebhookOptions.enabled() { + if checker == nil { + klog.V(2).Info("No audit policy file provided, no events will be recorded for webhook backend") + } else { + webhookBackend, err = o.WebhookOptions.newUntruncatedBackend() + if err != nil { + return err + } + } } - if err := o.WebhookOptions.applyTo(c); err != nil { + + groupVersion, err := schema.ParseGroupVersion(o.WebhookOptions.GroupVersionString) + if err != nil { return err } - if c.AuditBackend != nil && c.AuditPolicyChecker == nil { - glog.V(2).Info("No audit policy file provided for AdvancedAuditing, no events will be recorded.") + // 4. Apply dynamic options. + var dynamicBackend audit.Backend + if o.DynamicOptions.enabled() { + // if dynamic is enabled the webhook and log backends need to be wrapped in an enforced backend with the static policy + if webhookBackend != nil { + webhookBackend = pluginenforced.NewBackend(webhookBackend, checker) + } + if logBackend != nil { + logBackend = pluginenforced.NewBackend(logBackend, checker) + } + // build dynamic backend + dynamicBackend, checker, err = o.DynamicOptions.newBackend(c.ExternalAddress, kubeClientConfig, informers, processInfo, webhookOptions) + if err != nil { + return err + } + // union dynamic and webhook backends so that truncate options can be applied to both + dynamicBackend = appendBackend(webhookBackend, dynamicBackend) + dynamicBackend = o.WebhookOptions.TruncateOptions.wrapBackend(dynamicBackend, groupVersion) + } else if webhookBackend != nil { + // if only webhook is enabled wrap it in the truncate options + dynamicBackend = o.WebhookOptions.TruncateOptions.wrapBackend(webhookBackend, groupVersion) + } + + // 5. Set the policy checker + c.AuditPolicyChecker = checker + + // 6. Join the log backend with the webhooks + c.AuditBackend = appendBackend(logBackend, dynamicBackend) + + if c.AuditBackend != nil { + klog.V(2).Infof("Using audit backend: %s", c.AuditBackend) } return nil } -func (o *AuditOptions) applyTo(c *server.Config) error { +func (o *AuditOptions) newPolicyChecker() (policy.Checker, error) { if o.PolicyFile == "" { - return nil + return nil, nil } p, err := policy.LoadPolicyFromFile(o.PolicyFile) if err != nil { - return fmt.Errorf("loading audit policy file: %v", err) + return nil, fmt.Errorf("loading audit policy file: %v", err) } - c.AuditPolicyChecker = policy.NewChecker(p) - return nil + return policy.NewChecker(p), nil } func (o *AuditBatchOptions) AddFlags(pluginName string, fs *pflag.FlagSet) { @@ -315,10 +399,26 @@ func (o *AuditBatchOptions) AddFlags(pluginName string, fs *pflag.FlagSet) { "moment if ThrottleQPS was not utilized before. Only used in batch mode.") } +type ignoreErrorsBackend struct { + audit.Backend +} + +func (i *ignoreErrorsBackend) ProcessEvents(ev ...*auditinternal.Event) bool { + i.Backend.ProcessEvents(ev...) + return true +} + +func (i *ignoreErrorsBackend) String() string { + return fmt.Sprintf("ignoreErrors<%s>", i.Backend) +} + func (o *AuditBatchOptions) wrapBackend(delegate audit.Backend) audit.Backend { - if o.Mode == ModeBlocking { + if o.Mode == ModeBlockingStrict { return delegate } + if o.Mode == ModeBlocking { + return &ignoreErrorsBackend{Backend: delegate} + } return pluginbuffered.NewBackend(delegate, o.BatchConfig) } @@ -438,15 +538,12 @@ func (o *AuditLogOptions) getWriter() io.Writer { return w } -func (o *AuditLogOptions) applyTo(c *server.Config) error { - if w := o.getWriter(); w != nil { - groupVersion, _ := schema.ParseGroupVersion(o.GroupVersionString) - log := pluginlog.NewBackend(w, o.Format, groupVersion) - log = o.BatchOptions.wrapBackend(log) - log = o.TruncateOptions.wrapBackend(log, groupVersion) - c.AuditBackend = appendBackend(c.AuditBackend, log) - } - return nil +func (o *AuditLogOptions) newBackend(w io.Writer) audit.Backend { + groupVersion, _ := schema.ParseGroupVersion(o.GroupVersionString) + log := pluginlog.NewBackend(w, o.Format, groupVersion) + log = o.BatchOptions.wrapBackend(log) + log = o.TruncateOptions.wrapBackend(log, groupVersion) + return log } func (o *AuditWebhookOptions) AddFlags(fs *pflag.FlagSet) { @@ -485,20 +582,76 @@ func (o *AuditWebhookOptions) enabled() bool { return o != nil && o.ConfigFile != "" } -func (o *AuditWebhookOptions) applyTo(c *server.Config) error { - if !o.enabled() { - return nil - } - +// newUntruncatedBackend returns a webhook backend without the truncate options applied +// this is done so that the same trucate backend can wrap both the webhook and dynamic backends +func (o *AuditWebhookOptions) newUntruncatedBackend() (audit.Backend, error) { groupVersion, _ := schema.ParseGroupVersion(o.GroupVersionString) webhook, err := pluginwebhook.NewBackend(o.ConfigFile, groupVersion, o.InitialBackoff) if err != nil { - return fmt.Errorf("initializing audit webhook: %v", err) + return nil, fmt.Errorf("initializing audit webhook: %v", err) } webhook = o.BatchOptions.wrapBackend(webhook) - webhook = o.TruncateOptions.wrapBackend(webhook, groupVersion) - c.AuditBackend = appendBackend(c.AuditBackend, webhook) - return nil + return webhook, nil +} + +func (o *AuditDynamicOptions) AddFlags(fs *pflag.FlagSet) { + fs.BoolVar(&o.Enabled, "audit-dynamic-configuration", o.Enabled, + "Enables dynamic audit configuration. This feature also requires the DynamicAuditing feature flag") +} + +func (o *AuditDynamicOptions) enabled() bool { + return o.Enabled && utilfeature.DefaultFeatureGate.Enabled(features.DynamicAuditing) +} + +func (o *AuditDynamicOptions) Validate() []error { + var allErrors []error + if o.Enabled && !utilfeature.DefaultFeatureGate.Enabled(features.DynamicAuditing) { + allErrors = append(allErrors, fmt.Errorf("--audit-dynamic-configuration set, but DynamicAuditing feature gate is not enabled")) + } + return allErrors +} + +func (o *AuditDynamicOptions) newBackend( + hostname string, + kubeClientConfig *restclient.Config, + informers informers.SharedInformerFactory, + processInfo *ProcessInfo, + webhookOptions *WebhookOptions, +) (audit.Backend, policy.Checker, error) { + if err := validateProcessInfo(processInfo); err != nil { + return nil, nil, err + } + clientset, err := kubernetes.NewForConfig(kubeClientConfig) + if err != nil { + return nil, nil, err + } + if webhookOptions == nil { + webhookOptions = NewWebhookOptions() + } + checker := policy.NewDynamicChecker() + informer := informers.Auditregistration().V1alpha1().AuditSinks() + eventSink := &v1core.EventSinkImpl{Interface: clientset.CoreV1().Events(processInfo.Namespace)} + + dc := &plugindynamic.Config{ + Informer: informer, + BufferedConfig: plugindynamic.NewDefaultWebhookBatchConfig(), + EventConfig: plugindynamic.EventConfig{ + Sink: eventSink, + Source: corev1.EventSource{ + Component: processInfo.Name, + Host: hostname, + }, + }, + WebhookConfig: plugindynamic.WebhookConfig{ + AuthInfoResolverWrapper: webhookOptions.AuthInfoResolverWrapper, + ServiceResolver: webhookOptions.ServiceResolver, + }, + } + backend, err := plugindynamic.NewBackend(dc) + if err != nil { + return nil, nil, fmt.Errorf("could not create dynamic audit backend: %v", err) + } + return backend, checker, nil } // defaultWebhookBatchConfig returns the default BatchConfig used by the Webhook backend. diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authentication.go b/vendor/k8s.io/apiserver/pkg/server/options/authentication.go index 5c8209c35d0b3..877dc9133a7e6 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/authentication.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/authentication.go @@ -22,8 +22,8 @@ import ( "io/ioutil" "time" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -117,7 +117,12 @@ type DelegatingAuthenticationOptions struct { ClientCert ClientCertAuthenticationOptions RequestHeader RequestHeaderAuthenticationOptions + // SkipInClusterLookup indicates missing authentication configuration should not be retrieved from the cluster configmap SkipInClusterLookup bool + + // TolerateInClusterLookupFailure indicates failures to look up authentication configuration from the cluster configmap should not be fatal. + // Setting this can result in an authenticator that will reject all requests. + TolerateInClusterLookupFailure bool } func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions { @@ -160,6 +165,9 @@ func (s *DelegatingAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { fs.BoolVar(&s.SkipInClusterLookup, "authentication-skip-lookup", s.SkipInClusterLookup, ""+ "If false, the authentication-kubeconfig will be used to lookup missing authentication "+ "configuration from the cluster.") + fs.BoolVar(&s.TolerateInClusterLookupFailure, "authentication-tolerate-lookup-failure", s.TolerateInClusterLookupFailure, ""+ + "If true, failures to look up missing authentication configuration from the cluster are not considered fatal. "+ + "Note that this can result in authentication that treats all requests as anonymous.") } func (s *DelegatingAuthenticationOptions) ApplyTo(c *server.AuthenticationInfo, servingInfo *server.SecureServingInfo, openAPIConfig *openapicommon.Config) error { @@ -187,14 +195,22 @@ func (s *DelegatingAuthenticationOptions) ApplyTo(c *server.AuthenticationInfo, if !s.SkipInClusterLookup { err := s.lookupMissingConfigInCluster(client) if err != nil { - return err + if s.TolerateInClusterLookupFailure { + klog.Warningf("Error looking up in-cluster authentication configuration: %v", err) + klog.Warningf("Continuing without authentication configuration. This may treat all requests as anonymous.") + klog.Warningf("To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false") + } else { + return err + } } } // configure AuthenticationInfo config + cfg.ClientCAFile = s.ClientCert.ClientCA if err = c.ApplyClientCert(s.ClientCert.ClientCA, servingInfo); err != nil { return fmt.Errorf("unable to load client CA file: %v", err) } + cfg.RequestHeaderConfig = s.RequestHeader.ToAuthenticationRequestHeaderConfig() if err = c.ApplyClientCert(s.RequestHeader.ClientCAFile, servingInfo); err != nil { return fmt.Errorf("unable to load client CA file: %v", err) @@ -230,10 +246,10 @@ func (s *DelegatingAuthenticationOptions) lookupMissingConfigInCluster(client ku } if client == nil { if len(s.ClientCert.ClientCA) == 0 { - glog.Warningf("No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/%s in %s, so client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + klog.Warningf("No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/%s in %s, so client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) } if len(s.RequestHeader.ClientCAFile) == 0 { - glog.Warningf("No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/%s in %s, so request-header client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + klog.Warningf("No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/%s in %s, so request-header client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) } return nil } @@ -243,7 +259,7 @@ func (s *DelegatingAuthenticationOptions) lookupMissingConfigInCluster(client ku case errors.IsNotFound(err): // ignore, authConfigMap is nil now case errors.IsForbidden(err): - glog.Warningf("Unable to get configmap/%s in %s. Usually fixed by "+ + klog.Warningf("Unable to get configmap/%s in %s. Usually fixed by "+ "'kubectl create rolebinding -n %s ROLE_NAME --role=%s --serviceaccount=YOUR_NS:YOUR_SA'", authenticationConfigMapName, authenticationConfigMapNamespace, authenticationConfigMapNamespace, authenticationRoleName) return err @@ -262,7 +278,7 @@ func (s *DelegatingAuthenticationOptions) lookupMissingConfigInCluster(client ku } } if len(s.ClientCert.ClientCA) == 0 { - glog.Warningf("Cluster doesn't provide client-ca-file in configmap/%s in %s, so client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + klog.Warningf("Cluster doesn't provide client-ca-file in configmap/%s in %s, so client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) } } @@ -277,7 +293,7 @@ func (s *DelegatingAuthenticationOptions) lookupMissingConfigInCluster(client ku } } if len(s.RequestHeader.ClientCAFile) == 0 { - glog.Warningf("Cluster doesn't provide requestheader-client-ca-file in configmap/%s in %s, so request-header client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + klog.Warningf("Cluster doesn't provide requestheader-client-ca-file in configmap/%s in %s, so request-header client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) } } @@ -368,7 +384,7 @@ func (s *DelegatingAuthenticationOptions) getClient() (kubernetes.Interface, err clientConfig, err = rest.InClusterConfig() if err != nil && s.RemoteKubeConfigFileOptional { if err != rest.ErrNotInCluster { - glog.Warningf("failed to read in-cluster kubeconfig for delegated authentication: %v", err) + klog.Warningf("failed to read in-cluster kubeconfig for delegated authentication: %v", err) } return nil, nil } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authorization.go b/vendor/k8s.io/apiserver/pkg/server/options/authorization.go index 7c65dd3918486..5d81d9e86604d 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/authorization.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/authorization.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizerfactory" @@ -56,6 +56,9 @@ type DelegatingAuthorizationOptions struct { // AlwaysAllowPaths are HTTP paths which are excluded from authorization. They can be plain // paths or end in * in which case prefix-match is applied. A leading / is optional. AlwaysAllowPaths []string + + // AlwaysAllowGroups are groups which are allowed to take any actions. In kube, this is system:masters. + AlwaysAllowGroups []string } func NewDelegatingAuthorizationOptions() *DelegatingAuthorizationOptions { @@ -66,6 +69,18 @@ func NewDelegatingAuthorizationOptions() *DelegatingAuthorizationOptions { } } +// WithAlwaysAllowGroups appends the list of paths to AlwaysAllowGroups +func (s *DelegatingAuthorizationOptions) WithAlwaysAllowGroups(groups ...string) *DelegatingAuthorizationOptions { + s.AlwaysAllowGroups = append(s.AlwaysAllowGroups, groups...) + return s +} + +// WithAlwaysAllowPaths appends the list of paths to AlwaysAllowPaths +func (s *DelegatingAuthorizationOptions) WithAlwaysAllowPaths(paths ...string) *DelegatingAuthorizationOptions { + s.AlwaysAllowPaths = append(s.AlwaysAllowPaths, paths...) + return s +} + func (s *DelegatingAuthorizationOptions) Validate() []error { allErrors := []error{} return allErrors @@ -115,6 +130,10 @@ func (s *DelegatingAuthorizationOptions) ApplyTo(c *server.AuthorizationInfo) er func (s *DelegatingAuthorizationOptions) toAuthorizer(client kubernetes.Interface) (authorizer.Authorizer, error) { var authorizers []authorizer.Authorizer + if len(s.AlwaysAllowGroups) > 0 { + authorizers = append(authorizers, authorizerfactory.NewPrivilegedGroups(s.AlwaysAllowGroups...)) + } + if len(s.AlwaysAllowPaths) > 0 { a, err := path.NewAuthorizer(s.AlwaysAllowPaths) if err != nil { @@ -124,7 +143,7 @@ func (s *DelegatingAuthorizationOptions) toAuthorizer(client kubernetes.Interfac } if client == nil { - glog.Warningf("No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work.") + klog.Warningf("No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work.") } else { cfg := authorizerfactory.DelegatingAuthorizerConfig{ SubjectAccessReviewClient: client.AuthorizationV1beta1().SubjectAccessReviews(), @@ -155,7 +174,7 @@ func (s *DelegatingAuthorizationOptions) getClient() (kubernetes.Interface, erro clientConfig, err = rest.InClusterConfig() if err != nil && s.RemoteKubeConfigFileOptional { if err != rest.ErrNotInCluster { - glog.Warningf("failed to read in-cluster kubeconfig for delegated authorization: %v", err) + klog.Warningf("failed to read in-cluster kubeconfig for delegated authorization: %v", err) } return nil, nil } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go b/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go index e8e3d7feb3f5c..804f04bca962d 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go @@ -85,11 +85,13 @@ func (s *DeprecatedInsecureServingOptions) AddUnqualifiedFlags(fs *pflag.FlagSet } fs.IPVar(&s.BindAddress, "address", s.BindAddress, - "DEPRECATED: see --bind-address instead.") + "The IP address on which to serve the insecure --port (set to 0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces).") fs.MarkDeprecated("address", "see --bind-address instead.") + fs.Lookup("address").Hidden = false - fs.IntVar(&s.BindPort, "port", s.BindPort, "DEPRECATED: see --secure-port instead.") + fs.IntVar(&s.BindPort, "port", s.BindPort, "The port on which to serve unsecured, unauthenticated access. Set to 0 to disable.") fs.MarkDeprecated("port", "see --secure-port instead.") + fs.Lookup("port").Hidden = false } // ApplyTo adds DeprecatedInsecureServingOptions to the insecureserverinfo amd kube-controller manager configuration. @@ -150,11 +152,11 @@ func (s *DeprecatedInsecureServingOptionsWithLoopback) ApplyTo(insecureServingIn secureLoopbackClientConfig, err := (*insecureServingInfo).NewLoopbackClientConfig() switch { // if we failed and there's no fallback loopback client config, we need to fail - case err != nil && secureLoopbackClientConfig == nil: + case err != nil && *loopbackClientConfig == nil: return err // if we failed, but we already have a fallback loopback client config (usually insecure), allow it - case err != nil && secureLoopbackClientConfig != nil: + case err != nil && *loopbackClientConfig != nil: default: *loopbackClientConfig = secureLoopbackClientConfig diff --git a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go index b6a32f6f161ee..7db2cad8bb5b5 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go @@ -59,8 +59,6 @@ type EtcdOptions struct { } var storageTypes = sets.NewString( - storagebackend.StorageTypeUnset, - storagebackend.StorageTypeETCD2, storagebackend.StorageTypeETCD3, ) @@ -87,8 +85,8 @@ func (s *EtcdOptions) Validate() []error { allErrors = append(allErrors, fmt.Errorf("--etcd-servers must be specified")) } - if !storageTypes.Has(s.StorageConfig.Type) { - allErrors = append(allErrors, fmt.Errorf("--storage-backend invalid, must be 'etcd3' or 'etcd2'. If not specified, it will default to 'etcd3'")) + if s.StorageConfig.Type != storagebackend.StorageTypeUnset && !storageTypes.Has(s.StorageConfig.Type) { + allErrors = append(allErrors, fmt.Errorf("--storage-backend invalid, allowed values: %s. If not specified, it will default to 'etcd3'", strings.Join(storageTypes.List(), ", "))) } for _, override := range s.EtcdServersOverrides { @@ -143,10 +141,11 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { "have system defaults set by heuristics, others default to default-watch-cache-size") fs.StringVar(&s.StorageConfig.Type, "storage-backend", s.StorageConfig.Type, - "The storage backend for persistence. Options: 'etcd3' (default), 'etcd2'.") + "The storage backend for persistence. Options: 'etcd3' (default).") - fs.IntVar(&s.StorageConfig.DeserializationCacheSize, "deserialization-cache-size", s.StorageConfig.DeserializationCacheSize, - "Number of deserialized json objects to cache in memory.") + dummyCacheSize := 0 + fs.IntVar(&dummyCacheSize, "deserialization-cache-size", 0, "Number of deserialized json objects to cache in memory.") + fs.MarkDeprecated("deserialization-cache-size", "the deserialization cache was dropped in 1.13 with support for etcd2") fs.StringSliceVar(&s.StorageConfig.ServerList, "etcd-servers", s.StorageConfig.ServerList, "List of etcd servers to connect with (scheme://ip:port), comma separated.") @@ -163,12 +162,12 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.StorageConfig.CAFile, "etcd-cafile", s.StorageConfig.CAFile, "SSL Certificate Authority file used to secure etcd communication.") - fs.BoolVar(&s.StorageConfig.Quorum, "etcd-quorum-read", s.StorageConfig.Quorum, - "If true, enable quorum read. It defaults to true and is strongly recommended not setting to false.") - fs.MarkDeprecated("etcd-quorum-read", "This flag is deprecated and the ability to switch off quorum read will be removed in a future release.") - fs.StringVar(&s.EncryptionProviderConfigFilepath, "experimental-encryption-provider-config", s.EncryptionProviderConfigFilepath, "The file containing configuration for encryption providers to be used for storing secrets in etcd") + fs.MarkDeprecated("experimental-encryption-provider-config", "use --encryption-provider-config.") + + fs.StringVar(&s.EncryptionProviderConfigFilepath, "encryption-provider-config", s.EncryptionProviderConfigFilepath, + "The file containing configuration for encryption providers to be used for storing secrets in etcd") fs.DurationVar(&s.StorageConfig.CompactionInterval, "etcd-compaction-interval", s.StorageConfig.CompactionInterval, "The interval of compaction requests. If 0, the compaction request from apiserver is disabled.") diff --git a/vendor/k8s.io/apiserver/pkg/server/options/events.go b/vendor/k8s.io/apiserver/pkg/server/options/events.go new file mode 100644 index 0000000000000..2dfc0111fccef --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/events.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "os" +) + +// ProcessInfo holds the apiserver process information used to send events +type ProcessInfo struct { + // Name of the api process to identify events + Name string + + // Namespace of the api process to send events + Namespace string +} + +// NewProcessInfo returns a new process info with the hostname concatenated to the name given +func NewProcessInfo(name, namespace string) *ProcessInfo { + // try to concat the hostname if available + host, _ := os.Hostname() + if host != "" { + name = fmt.Sprintf("%s-%s", name, host) + } + return &ProcessInfo{ + Name: name, + Namespace: namespace, + } +} + +// validateProcessInfo checks for a complete process info +func validateProcessInfo(p *ProcessInfo) error { + if p == nil { + return fmt.Errorf("ProcessInfo must be set") + } else if p.Name == "" { + return fmt.Errorf("ProcessInfo name must be set") + } else if p.Namespace == "" { + return fmt.Errorf("ProcessInfo namespace must be set") + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go index 5016145bd146c..500d578d6bd35 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go @@ -41,9 +41,12 @@ type RecommendedOptions struct { // admission plugin initializers to Admission.ApplyTo. ExtraAdmissionInitializers func(c *server.RecommendedConfig) ([]admission.PluginInitializer, error) Admission *AdmissionOptions + // ProcessInfo is used to identify events created by the server. + ProcessInfo *ProcessInfo + Webhook *WebhookOptions } -func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptions { +func NewRecommendedOptions(prefix string, codec runtime.Codec, processInfo *ProcessInfo) *RecommendedOptions { sso := NewSecureServingOptions() // We are composing recommended options for an aggregated api-server, @@ -62,6 +65,8 @@ func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptio CoreAPI: NewCoreAPIOptions(), ExtraAdmissionInitializers: func(c *server.RecommendedConfig) ([]admission.PluginInitializer, error) { return nil, nil }, Admission: NewAdmissionOptions(), + ProcessInfo: processInfo, + Webhook: NewWebhookOptions(), } } @@ -92,7 +97,7 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig, scheme *r if err := o.Authorization.ApplyTo(&config.Config.Authorization); err != nil { return err } - if err := o.Audit.ApplyTo(&config.Config); err != nil { + if err := o.Audit.ApplyTo(&config.Config, config.ClientConfig, config.SharedInformerFactory, o.ProcessInfo, o.Webhook); err != nil { return err } if err := o.Features.ApplyTo(&config.Config); err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go index fccb24e03ad5a..de6b32f455f9a 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -42,7 +42,15 @@ type ServerRunOptions struct { MaxMutatingRequestsInFlight int RequestTimeout time.Duration MinRequestTimeout int - TargetRAMMB int + // We intentionally did not add a flag for this option. Users of the + // apiserver library can wire it to a flag. + JSONPatchMaxCopyBytes int64 + // The limit on the request body size that would be accepted and + // decoded in a write request. 0 means no limit. + // We intentionally did not add a flag for this option. Users of the + // apiserver library can wire it to a flag. + MaxRequestBodyBytes int64 + TargetRAMMB int } func NewServerRunOptions() *ServerRunOptions { @@ -52,6 +60,8 @@ func NewServerRunOptions() *ServerRunOptions { MaxMutatingRequestsInFlight: defaults.MaxMutatingRequestsInFlight, RequestTimeout: defaults.RequestTimeout, MinRequestTimeout: defaults.MinRequestTimeout, + JSONPatchMaxCopyBytes: defaults.JSONPatchMaxCopyBytes, + MaxRequestBodyBytes: defaults.MaxRequestBodyBytes, } } @@ -63,6 +73,8 @@ func (s *ServerRunOptions) ApplyTo(c *server.Config) error { c.MaxMutatingRequestsInFlight = s.MaxMutatingRequestsInFlight c.RequestTimeout = s.RequestTimeout c.MinRequestTimeout = s.MinRequestTimeout + c.JSONPatchMaxCopyBytes = s.JSONPatchMaxCopyBytes + c.MaxRequestBodyBytes = s.MaxRequestBodyBytes c.PublicAddress = s.AdvertiseAddress return nil @@ -107,10 +119,18 @@ func (s *ServerRunOptions) Validate() []error { errors = append(errors, fmt.Errorf("--min-request-timeout can not be negative value")) } + if s.JSONPatchMaxCopyBytes < 0 { + errors = append(errors, fmt.Errorf("--json-patch-max-copy-bytes can not be negative value")) + } + + if s.MaxRequestBodyBytes < 0 { + errors = append(errors, fmt.Errorf("--max-resource-write-bytes can not be negative value")) + } + return errors } -// AddFlags adds flags for a specific APIServer to the specified FlagSet +// AddUniversalFlags adds flags for a specific APIServer to the specified FlagSet func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { // Note: the weird ""+ in below lines seems to be the only way to get gofmt to // arrange these text blocks sensibly. Grrr. diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving.go b/vendor/k8s.io/apiserver/pkg/server/options/serving.go index 3ecc7e02094c1..939e05741da4f 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving.go @@ -24,8 +24,8 @@ import ( "strconv" "strings" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apiserver/pkg/server" @@ -75,19 +75,25 @@ type CertKey struct { } type GeneratableKeyCert struct { + // CertKey allows setting an explicit cert/key file to use. CertKey CertKey - // CertDirectory is a directory that will contain the certificates. If the cert and key aren't specifically set - // this will be used to derive a match with the "pair-name" + // CertDirectory specifies a directory to write generated certificates to if CertFile/KeyFile aren't explicitly set. + // PairName is used to determine the filenames within CertDirectory. + // If CertDirectory and PairName are not set, an in-memory certificate will be generated. CertDirectory string + // PairName is the name which will be used with CertDirectory to make a cert and key filenames. + // It becomes CertDirectory/PairName.crt and CertDirectory/PairName.key + PairName string + + // GeneratedCert holds an in-memory generated certificate if CertFile/KeyFile aren't explicitly set, and CertDirectory/PairName are not set. + GeneratedCert *tls.Certificate + // FixtureDirectory is a directory that contains test fixture used to avoid regeneration of certs during tests. // The format is: // _-_-.crt // _-_-.key FixtureDirectory string - // PairName is the name which will be used with CertDirectory to make a cert and key names - // It becomes CertDirector/PairName.crt and CertDirector/PairName.key - PairName string } func NewSecureServingOptions() *SecureServingOptions { @@ -121,6 +127,10 @@ func (s *SecureServingOptions) Validate() []error { errors = append(errors, fmt.Errorf("--secure-port %v must be between 0 and 65535, inclusive. 0 for turning off secure port", s.BindPort)) } + if (len(s.ServerCert.CertKey.CertFile) != 0 || len(s.ServerCert.CertKey.KeyFile) != 0) && s.ServerCert.GeneratedCert != nil { + errors = append(errors, fmt.Errorf("cert/key file and in-memory certificate cannot both be set")) + } + return errors } @@ -219,6 +229,8 @@ func (s *SecureServingOptions) ApplyTo(config **server.SecureServingInfo) error return fmt.Errorf("unable to load server certificate: %v", err) } c.Cert = &tlsCert + } else if s.ServerCert.GeneratedCert != nil { + c.Cert = s.ServerCert.GeneratedCert } if len(s.CipherSuites) != 0 { @@ -264,13 +276,20 @@ func (s *SecureServingOptions) MaybeDefaultWithSelfSignedCerts(publicAddress str return nil } - keyCert.CertFile = path.Join(s.ServerCert.CertDirectory, s.ServerCert.PairName+".crt") - keyCert.KeyFile = path.Join(s.ServerCert.CertDirectory, s.ServerCert.PairName+".key") - - canReadCertAndKey, err := certutil.CanReadCertAndKey(keyCert.CertFile, keyCert.KeyFile) - if err != nil { - return err + canReadCertAndKey := false + if len(s.ServerCert.CertDirectory) > 0 { + if len(s.ServerCert.PairName) == 0 { + return fmt.Errorf("PairName is required if CertDirectory is set") + } + keyCert.CertFile = path.Join(s.ServerCert.CertDirectory, s.ServerCert.PairName+".crt") + keyCert.KeyFile = path.Join(s.ServerCert.CertDirectory, s.ServerCert.PairName+".key") + if canRead, err := certutil.CanReadCertAndKey(keyCert.CertFile, keyCert.KeyFile); err != nil { + return err + } else { + canReadCertAndKey = canRead + } } + if !canReadCertAndKey { // add either the bind address or localhost to the valid alternates bindIP := s.BindAddress.String() @@ -282,15 +301,21 @@ func (s *SecureServingOptions) MaybeDefaultWithSelfSignedCerts(publicAddress str if cert, key, err := certutil.GenerateSelfSignedCertKeyWithFixtures(publicAddress, alternateIPs, alternateDNS, s.ServerCert.FixtureDirectory); err != nil { return fmt.Errorf("unable to generate self signed cert: %v", err) - } else { + } else if len(keyCert.CertFile) > 0 && len(keyCert.KeyFile) > 0 { if err := certutil.WriteCert(keyCert.CertFile, cert); err != nil { return err } - if err := certutil.WriteKey(keyCert.KeyFile, key); err != nil { return err } - glog.Infof("Generated self-signed cert (%s, %s)", keyCert.CertFile, keyCert.KeyFile) + klog.Infof("Generated self-signed cert (%s, %s)", keyCert.CertFile, keyCert.KeyFile) + } else { + tlsCert, err := tls.X509KeyPair(cert, key) + if err != nil { + return fmt.Errorf("unable to generate self signed cert: %v", err) + } + s.ServerCert.GeneratedCert = &tlsCert + klog.Infof("Generated self-signed cert in-memory") } } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go b/vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go index ac9635d2dd320..7f19206425e71 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go @@ -63,11 +63,11 @@ func (s *SecureServingOptionsWithLoopback) ApplyTo(secureServingInfo **server.Se secureLoopbackClientConfig, err := (*secureServingInfo).NewLoopbackClientConfig(uuid.NewRandom().String(), certPem) switch { // if we failed and there's no fallback loopback client config, we need to fail - case err != nil && secureLoopbackClientConfig == nil: + case err != nil && *loopbackClientConfig == nil: return err // if we failed, but we already have a fallback loopback client config (usually insecure), allow it - case err != nil && secureLoopbackClientConfig != nil: + case err != nil && *loopbackClientConfig != nil: default: *loopbackClientConfig = secureLoopbackClientConfig diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction_expansion.go b/vendor/k8s.io/apiserver/pkg/server/options/webhook.go similarity index 51% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction_expansion.go rename to vendor/k8s.io/apiserver/pkg/server/options/webhook.go index 8e2030101bf8e..bd3ec124d6d08 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction_expansion.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/webhook.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,25 +14,21 @@ See the License for the specific language governing permissions and limitations under the License. */ -package internalversion +package options import ( - policy "k8s.io/kubernetes/pkg/apis/policy" + utilwebhook "k8s.io/apiserver/pkg/util/webhook" ) -// The EvictionExpansion interface allows manually adding extra methods to the ScaleInterface. -type EvictionExpansion interface { - Evict(eviction *policy.Eviction) error +// WebhookOptions holds the outgoing webhook options +type WebhookOptions struct { + ServiceResolver utilwebhook.ServiceResolver + AuthInfoResolverWrapper utilwebhook.AuthenticationInfoResolverWrapper } -func (c *evictions) Evict(eviction *policy.Eviction) error { - return c.client.Post(). - AbsPath("/api/v1"). - Namespace(eviction.Namespace). - Resource("pods"). - Name(eviction.Name). - SubResource("eviction"). - Body(eviction). - Do(). - Error() +// NewWebhookOptions returns the default options for outgoing webhooks +func NewWebhookOptions() *WebhookOptions { + return &WebhookOptions{ + ServiceResolver: utilwebhook.NewDefaultServiceResolver(), + } } diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/routes/BUILD.bazel index a22f2a15e593c..3722d6caea0f3 100644 --- a/vendor/k8s.io/apiserver/pkg/server/routes/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/server/routes/BUILD.bazel @@ -20,7 +20,6 @@ go_library( "//vendor/github.com/elazarl/go-bindata-assetfs:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -30,6 +29,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/server/mux:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/routes/data/swagger:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/metrics:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/handler:go_default_library", ], diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/flags.go b/vendor/k8s.io/apiserver/pkg/server/routes/flags.go index d40f11499b3fe..a03b80d3ce79f 100644 --- a/vendor/k8s.io/apiserver/pkg/server/routes/flags.go +++ b/vendor/k8s.io/apiserver/pkg/server/routes/flags.go @@ -24,7 +24,7 @@ import ( "path" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apiserver/pkg/server/mux" ) @@ -57,7 +57,7 @@ func (f DebugFlags) Index(w http.ResponseWriter, r *http.Request) { lock.RLock() defer lock.RUnlock() if err := indexTmpl.Execute(w, registeredFlags); err != nil { - glog.Error(err) + klog.Error(err) } } diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go b/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go index 06c723d37531e..934bbf84a0429 100644 --- a/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go +++ b/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go @@ -18,7 +18,7 @@ package routes import ( restful "github.com/emicklei/go-restful" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apiserver/pkg/server/mux" "k8s.io/kube-openapi/pkg/common" @@ -37,10 +37,10 @@ func (oa OpenAPI) Install(c *restful.Container, mux *mux.PathRecorderMux) { // are tracked at: https://docs.google.com/document/d/19lEqE9lc4yHJ3WJAJxS_G7TcORIJXGHyq3wpwcH28nU. _, err := handler.BuildAndRegisterOpenAPIService("/swagger.json", c.RegisteredWebServices(), oa.Config, mux) if err != nil { - glog.Fatalf("Failed to register open api spec for root: %v", err) + klog.Fatalf("Failed to register open api spec for root: %v", err) } _, err = handler.BuildAndRegisterOpenAPIVersionedService("/openapi/v2", c.RegisteredWebServices(), oa.Config, mux) if err != nil { - glog.Fatalf("Failed to register versioned open api spec for root: %v", err) + klog.Fatalf("Failed to register versioned open api spec for root: %v", err) } } diff --git a/vendor/k8s.io/apiserver/pkg/server/secure_serving.go b/vendor/k8s.io/apiserver/pkg/server/secure_serving.go index 67a45d5779f29..08006c965502b 100644 --- a/vendor/k8s.io/apiserver/pkg/server/secure_serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/secure_serving.go @@ -26,8 +26,8 @@ import ( "strings" "time" - "github.com/golang/glog" "golang.org/x/net/http2" + "k8s.io/klog" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/validation" @@ -113,7 +113,7 @@ func (s *SecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Dur return fmt.Errorf("error configuring http2: %v", err) } - glog.Infof("Serving securely on %s", secureServer.Addr) + klog.Infof("Serving securely on %s", secureServer.Addr) return RunServer(secureServer, s.Listener, shutdownTimeout, stopCh) } @@ -153,7 +153,7 @@ func RunServer( msg := fmt.Sprintf("Stopped listening on %s", ln.Addr().String()) select { case <-stopCh: - glog.Info(msg) + klog.Info(msg) default: panic(fmt.Sprintf("%s due to error: %v", msg, err)) } diff --git a/vendor/k8s.io/apiserver/pkg/server/signal.go b/vendor/k8s.io/apiserver/pkg/server/signal.go index 1cd8cefaa2576..6f0cff4baed8d 100644 --- a/vendor/k8s.io/apiserver/pkg/server/signal.go +++ b/vendor/k8s.io/apiserver/pkg/server/signal.go @@ -26,7 +26,7 @@ var onlyOneSignalHandler = make(chan struct{}) // SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned // which is closed on one of these signals. If a second signal is caught, the program // is terminated with exit code 1. -func SetupSignalHandler() (stopCh <-chan struct{}) { +func SetupSignalHandler() <-chan struct{} { close(onlyOneSignalHandler) // panics when called twice stop := make(chan struct{}) diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/storage/BUILD.bazel index badbb928709db..3a4afbf0119e1 100644 --- a/vendor/k8s.io/apiserver/pkg/server/storage/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/server/storage/BUILD.bazel @@ -13,7 +13,6 @@ go_library( importpath = "k8s.io/apiserver/pkg/server/storage", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer:go_default_library", @@ -22,5 +21,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/storage_codec.go b/vendor/k8s.io/apiserver/pkg/server/storage/storage_codec.go index bbdc4b9a059ff..e2f91bf13d299 100644 --- a/vendor/k8s.io/apiserver/pkg/server/storage/storage_codec.go +++ b/vendor/k8s.io/apiserver/pkg/server/storage/storage_codec.go @@ -24,8 +24,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" "k8s.io/apiserver/pkg/storage/storagebackend" - - "github.com/golang/glog" ) // StorageCodecConfig are the arguments passed to newStorageCodecFn @@ -48,11 +46,6 @@ func NewStorageCodec(opts StorageCodecConfig) (runtime.Codec, error) { return nil, fmt.Errorf("%q is not a valid mime-type", opts.StorageMediaType) } - if opts.Config.Type == storagebackend.StorageTypeETCD2 && mediaType != "application/json" { - glog.Warningf(`storage type %q does not support media type %q, using "application/json"`, storagebackend.StorageTypeETCD2, mediaType) - mediaType = "application/json" - } - serializer, ok := runtime.SerializerInfoForMediaType(opts.StorageSerializer.SupportedMediaTypes(), mediaType) if !ok { return nil, fmt.Errorf("unable to find serializer for %q", mediaType) @@ -60,11 +53,6 @@ func NewStorageCodec(opts StorageCodecConfig) (runtime.Codec, error) { s := serializer.Serializer - // make sure the selected encoder supports string data - if !serializer.EncodesAsText && opts.Config.Type == storagebackend.StorageTypeETCD2 { - return nil, fmt.Errorf("storage type %q does not support binary media type %q", storagebackend.StorageTypeETCD2, mediaType) - } - // Give callers the opportunity to wrap encoders and decoders. For decoders, each returned decoder will // be passed to the recognizer so that multiple decoders are available. var encoder runtime.Encoder = s @@ -97,7 +85,7 @@ func NewStorageCodec(opts StorageCodecConfig) (runtime.Codec, error) { ) decoder := opts.StorageSerializer.DecoderToVersion( recognizer.NewDecoder(decoders...), - runtime.NewMultiGroupVersioner( + runtime.NewCoercingMultiGroupVersioner( opts.MemoryVersion, schema.GroupKind{Group: opts.MemoryVersion.Group}, schema.GroupKind{Group: opts.StorageVersion.Group}, diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go index 50c068254631f..a87ce4a5efb1f 100644 --- a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go +++ b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go @@ -22,7 +22,7 @@ import ( "io/ioutil" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -282,7 +282,7 @@ func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource) (* if err != nil { return nil, err } - glog.V(3).Infof("storing %v in %v, reading as %v from %#v", groupResource, codecConfig.StorageVersion, codecConfig.MemoryVersion, codecConfig.Config) + klog.V(3).Infof("storing %v in %v, reading as %v from %#v", groupResource, codecConfig.StorageVersion, codecConfig.MemoryVersion, codecConfig.Config) return &storageConfig, nil } @@ -302,14 +302,14 @@ func (s *DefaultStorageFactory) Backends() []Backend { if len(s.StorageConfig.CertFile) > 0 && len(s.StorageConfig.KeyFile) > 0 { cert, err := tls.LoadX509KeyPair(s.StorageConfig.CertFile, s.StorageConfig.KeyFile) if err != nil { - glog.Errorf("failed to load key pair while getting backends: %s", err) + klog.Errorf("failed to load key pair while getting backends: %s", err) } else { tlsConfig.Certificates = []tls.Certificate{cert} } } if len(s.StorageConfig.CAFile) > 0 { if caCert, err := ioutil.ReadFile(s.StorageConfig.CAFile); err != nil { - glog.Errorf("failed to read ca file while getting backends: %s", err) + klog.Errorf("failed to read ca file while getting backends: %s", err) } else { caPool := x509.NewCertPool() caPool.AppendCertsFromPEM(caCert) diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/cacher/BUILD.bazel index bb5560f0d394b..6fa5656dbe831 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/BUILD.bazel @@ -12,7 +12,6 @@ go_library( importpath = "k8s.io/apiserver/pkg/storage/cacher", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -29,5 +28,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index f73634cd9513b..fb5aa229d9a12 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -122,7 +122,7 @@ func (i *indexedWatchers) deleteWatcher(number int, value string, supported bool func (i *indexedWatchers) terminateAll(objectType reflect.Type) { if len(i.allWatchers) > 0 || len(i.valueWatchers) > 0 { - glog.Warningf("Terminating all watchers from cacher %v", objectType) + klog.Warningf("Terminating all watchers from cacher %v", objectType) } i.allWatchers.terminateAll() for index, watchers := range i.valueWatchers { @@ -269,7 +269,7 @@ func (c *Cacher) startCaching(stopChannel <-chan struct{}) { // Also note that startCaching is called in a loop, so there's no need // to have another loop here. if err := c.reflector.ListAndWatch(stopChannel); err != nil { - glog.Errorf("unexpected ListAndWatch error: %v", err) + klog.Errorf("unexpected ListAndWatch error: %v", err) } } @@ -335,6 +335,13 @@ func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string, chanSize = 1000 } + // With some events already sent, update resourceVersion so that + // events that were buffered and not yet processed won't be delivered + // to this watcher second time causing going back in time. + if len(initEvents) > 0 { + watchRV = initEvents[len(initEvents)-1].ResourceVersion + } + c.Lock() defer c.Unlock() forget := forgetWatcher(c, c.watcherIdx, triggerValue, triggerSupported) @@ -404,10 +411,15 @@ func (c *Cacher) Get(ctx context.Context, key string, resourceVersion string, ob // GetToList implements storage.Interface. func (c *Cacher) GetToList(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate, listObj runtime.Object) error { pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) - if resourceVersion == "" || (pagingEnabled && (len(pred.Continue) > 0 || pred.Limit > 0)) { + hasContinuation := pagingEnabled && len(pred.Continue) > 0 + hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0" + if resourceVersion == "" || hasContinuation || hasLimit { // If resourceVersion is not specified, serve it from underlying - // storage (for backward compatibility). If a continuation or limit is + // storage (for backward compatibility). If a continuation is // requested, serve it from the underlying storage as well. + // Limits are only sent to storage when resourceVersion is non-zero + // since the watch cache isn't able to perform continuations, and + // limits are ignored when resource version is zero return c.storage.GetToList(ctx, key, resourceVersion, pred, listObj) } @@ -547,7 +559,7 @@ func (c *Cacher) GuaranteedUpdate( // Ignore the suggestion and try to pass down the current version of the object // read from cache. if elem, exists, err := c.watchCache.GetByKey(key); err != nil { - glog.Errorf("GetByKey returned error: %v", err) + klog.Errorf("GetByKey returned error: %v", err) } else if exists { currObj := elem.(*storeElement).Object.DeepCopyObject() return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, currObj) @@ -590,7 +602,7 @@ func (c *Cacher) triggerValues(event *watchCacheEvent) ([]string, bool) { func (c *Cacher) processEvent(event *watchCacheEvent) { if curLen := int64(len(c.incoming)); c.incomingHWM.Update(curLen) { // Monitor if this gets backed up, and how much. - glog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.objectType.String(), curLen) + klog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.objectType.String(), curLen) } c.incoming <- *event } @@ -679,7 +691,7 @@ func forgetWatcher(c *Cacher, index int, triggerValue string, triggerSupported b // false is currently passed only if we are forcing watcher to close due // to its unresponsiveness and blocking other watchers. // TODO: Get this information in cleaner way. - glog.V(1).Infof("Forcing watcher close due to unresponsiveness: %v", c.objectType.String()) + klog.V(1).Infof("Forcing watcher close due to unresponsiveness: %v", c.objectType.String()) } // It's possible that the watcher is already not in the structure (e.g. in case of // simultaneous Stop() and terminateAllWatchers(), but it doesn't break anything. @@ -942,7 +954,7 @@ func (c *cacheWatcher) process(initEvents []*watchCacheEvent, resourceVersion ui if len(initEvents) > 0 { objType = reflect.TypeOf(initEvents[0].Object).String() } - glog.V(2).Infof("processing %d initEvents of %s took %v", len(initEvents), objType, processingTime) + klog.V(2).Infof("processing %d initEvents of %s took %v", len(initEvents), objType, processingTime) } defer close(c.result) diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD.bazel index 97d3a81665ccb..aeefb88fcfe9d 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD.bazel @@ -5,26 +5,14 @@ go_library( srcs = [ "api_object_versioner.go", "doc.go", - "etcd_helper.go", - "etcd_watcher.go", ], importmap = "k8s.io/kops/vendor/k8s.io/apiserver/pkg/storage/etcd", importpath = "k8s.io/apiserver/pkg/storage/etcd", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/coreos/etcd/client:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/cache:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/etcd/metrics:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/etcd/util:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go b/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go deleted file mode 100644 index c6bdd9c92cdbc..0000000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go +++ /dev/null @@ -1,637 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package etcd - -import ( - "context" - "errors" - "fmt" - "path" - "reflect" - "time" - - etcd "github.com/coreos/etcd/client" - "github.com/golang/glog" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - utilcache "k8s.io/apimachinery/pkg/util/cache" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/apiserver/pkg/storage" - "k8s.io/apiserver/pkg/storage/etcd/metrics" - etcdutil "k8s.io/apiserver/pkg/storage/etcd/util" - utiltrace "k8s.io/apiserver/pkg/util/trace" -) - -// ValueTransformer allows a string value to be transformed before being read from or written to the underlying store. The methods -// must be able to undo the transformation caused by the other. -type ValueTransformer interface { - // TransformStringFromStorage may transform the provided string from its underlying storage representation or return an error. - // Stale is true if the object on disk is stale and a write to etcd should be issued, even if the contents of the object - // have not changed. - TransformStringFromStorage(string) (value string, stale bool, err error) - // TransformStringToStorage may transform the provided string into the appropriate form in storage or return an error. - TransformStringToStorage(string) (value string, err error) -} - -type identityTransformer struct{} - -func (identityTransformer) TransformStringFromStorage(s string) (string, bool, error) { - return s, false, nil -} -func (identityTransformer) TransformStringToStorage(s string) (string, error) { return s, nil } - -// IdentityTransformer performs no transformation on the provided values. -var IdentityTransformer ValueTransformer = identityTransformer{} - -// Creates a new storage interface from the client -// TODO: deprecate in favor of storage.Config abstraction over time -func NewEtcdStorage(client etcd.Client, codec runtime.Codec, prefix string, quorum bool, cacheSize int, transformer ValueTransformer) storage.Interface { - return &etcdHelper{ - etcdMembersAPI: etcd.NewMembersAPI(client), - etcdKeysAPI: etcd.NewKeysAPI(client), - codec: codec, - versioner: APIObjectVersioner{}, - transformer: transformer, - pathPrefix: path.Join("/", prefix), - quorum: quorum, - cache: utilcache.NewCache(cacheSize), - } -} - -// etcdHelper is the reference implementation of storage.Interface. -type etcdHelper struct { - etcdMembersAPI etcd.MembersAPI - etcdKeysAPI etcd.KeysAPI - codec runtime.Codec - transformer ValueTransformer - // Note that versioner is required for etcdHelper to work correctly. - // The public constructors (NewStorage & NewEtcdStorage) are setting it - // correctly, so be careful when manipulating with it manually. - // optional, has to be set to perform any atomic operations - versioner storage.Versioner - // prefix for all etcd keys - pathPrefix string - // if true, perform quorum read - quorum bool - - // We cache objects stored in etcd. For keys we use Node.ModifiedIndex which is equivalent - // to resourceVersion. - // This depends on etcd's indexes being globally unique across all objects/types. This will - // have to revisited if we decide to do things like multiple etcd clusters, or etcd will - // support multi-object transaction that will result in many objects with the same index. - // Number of entries stored in the cache is controlled by maxEtcdCacheEntries constant. - // TODO: Measure how much this cache helps after the conversion code is optimized. - cache utilcache.Cache -} - -// Implements storage.Interface. -func (h *etcdHelper) Versioner() storage.Versioner { - return h.versioner -} - -// Implements storage.Interface. -func (h *etcdHelper) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { - trace := utiltrace.New("etcdHelper::Create " + getTypeName(obj)) - defer trace.LogIfLong(250 * time.Millisecond) - if ctx == nil { - glog.Errorf("Context is nil") - } - key = path.Join(h.pathPrefix, key) - data, err := runtime.Encode(h.codec, obj) - trace.Step("Object encoded") - if err != nil { - return err - } - if version, err := h.versioner.ObjectResourceVersion(obj); err == nil && version != 0 { - return errors.New("resourceVersion may not be set on objects to be created") - } - if err := h.versioner.PrepareObjectForStorage(obj); err != nil { - return fmt.Errorf("PrepareObjectForStorage returned an error: %v", err) - } - trace.Step("Version checked") - - startTime := time.Now() - opts := etcd.SetOptions{ - TTL: time.Duration(ttl) * time.Second, - PrevExist: etcd.PrevNoExist, - } - - newBody, err := h.transformer.TransformStringToStorage(string(data)) - if err != nil { - return storage.NewInternalError(err.Error()) - } - - response, err := h.etcdKeysAPI.Set(ctx, key, newBody, &opts) - trace.Step("Object created") - metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime) - if err != nil { - return toStorageErr(err, key, 0) - } - if out != nil { - if _, err := conversion.EnforcePtr(out); err != nil { - panic("unable to convert output object to pointer") - } - _, _, _, err = h.extractObj(response, err, out, false, false) - } - return err -} - -// Implements storage.Interface. -func (h *etcdHelper) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions) error { - if ctx == nil { - glog.Errorf("Context is nil") - } - key = path.Join(h.pathPrefix, key) - v, err := conversion.EnforcePtr(out) - if err != nil { - panic("unable to convert output object to pointer") - } - - if preconditions == nil { - startTime := time.Now() - response, err := h.etcdKeysAPI.Delete(ctx, key, nil) - metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime) - if !etcdutil.IsEtcdNotFound(err) { - // if the object that existed prior to the delete is returned by etcd, update the out object. - if err != nil || response.PrevNode != nil { - _, _, _, err = h.extractObj(response, err, out, false, true) - } - } - return toStorageErr(err, key, 0) - } - - // Check the preconditions match. - obj := reflect.New(v.Type()).Interface().(runtime.Object) - for { - _, node, res, _, err := h.bodyAndExtractObj(ctx, key, obj, false) - if err != nil { - return toStorageErr(err, key, 0) - } - if err := preconditions.Check(key, obj); err != nil { - return toStorageErr(err, key, 0) - } - index := uint64(0) - if node != nil { - index = node.ModifiedIndex - } else if res != nil { - index = res.Index - } - opt := etcd.DeleteOptions{PrevIndex: index} - startTime := time.Now() - response, err := h.etcdKeysAPI.Delete(ctx, key, &opt) - metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime) - if !etcdutil.IsEtcdTestFailed(err) { - if !etcdutil.IsEtcdNotFound(err) { - // if the object that existed prior to the delete is returned by etcd, update the out object. - if err != nil || response.PrevNode != nil { - _, _, _, err = h.extractObj(response, err, out, false, true) - } - } - return toStorageErr(err, key, 0) - } - - glog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key) - } -} - -// Implements storage.Interface. -func (h *etcdHelper) Watch(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate) (watch.Interface, error) { - if ctx == nil { - glog.Errorf("Context is nil") - } - watchRV, err := h.versioner.ParseResourceVersion(resourceVersion) - if err != nil { - return nil, err - } - key = path.Join(h.pathPrefix, key) - w := newEtcdWatcher(false, h.quorum, nil, pred, h.codec, h.versioner, nil, h.transformer, h) - go w.etcdWatch(ctx, h.etcdKeysAPI, key, watchRV) - return w, nil -} - -// Implements storage.Interface. -func (h *etcdHelper) WatchList(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate) (watch.Interface, error) { - if ctx == nil { - glog.Errorf("Context is nil") - } - watchRV, err := h.versioner.ParseResourceVersion(resourceVersion) - if err != nil { - return nil, err - } - key = path.Join(h.pathPrefix, key) - w := newEtcdWatcher(true, h.quorum, exceptKey(key), pred, h.codec, h.versioner, nil, h.transformer, h) - go w.etcdWatch(ctx, h.etcdKeysAPI, key, watchRV) - return w, nil -} - -// Implements storage.Interface. -func (h *etcdHelper) Get(ctx context.Context, key string, resourceVersion string, objPtr runtime.Object, ignoreNotFound bool) error { - if ctx == nil { - glog.Errorf("Context is nil") - } - key = path.Join(h.pathPrefix, key) - _, _, _, _, err := h.bodyAndExtractObj(ctx, key, objPtr, ignoreNotFound) - return err -} - -// bodyAndExtractObj performs the normal Get path to etcd, returning the parsed node and response for additional information -// about the response, like the current etcd index and the ttl. -func (h *etcdHelper) bodyAndExtractObj(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) (body string, node *etcd.Node, res *etcd.Response, stale bool, err error) { - if ctx == nil { - glog.Errorf("Context is nil") - } - startTime := time.Now() - - opts := &etcd.GetOptions{ - Quorum: h.quorum, - } - - response, err := h.etcdKeysAPI.Get(ctx, key, opts) - metrics.RecordEtcdRequestLatency("get", getTypeName(objPtr), startTime) - if err != nil && !etcdutil.IsEtcdNotFound(err) { - return "", nil, nil, false, toStorageErr(err, key, 0) - } - body, node, stale, err = h.extractObj(response, err, objPtr, ignoreNotFound, false) - return body, node, response, stale, toStorageErr(err, key, 0) -} - -func (h *etcdHelper) extractObj(response *etcd.Response, inErr error, objPtr runtime.Object, ignoreNotFound, prevNode bool) (body string, node *etcd.Node, stale bool, err error) { - if response != nil { - if prevNode { - node = response.PrevNode - } else { - node = response.Node - } - } - if inErr != nil || node == nil || len(node.Value) == 0 { - if ignoreNotFound { - v, err := conversion.EnforcePtr(objPtr) - if err != nil { - return "", nil, false, err - } - v.Set(reflect.Zero(v.Type())) - return "", nil, false, nil - } else if inErr != nil { - return "", nil, false, inErr - } - return "", nil, false, fmt.Errorf("unable to locate a value on the response: %#v", response) - } - - body, stale, err = h.transformer.TransformStringFromStorage(node.Value) - if err != nil { - return body, nil, stale, storage.NewInternalError(err.Error()) - } - out, gvk, err := h.codec.Decode([]byte(body), nil, objPtr) - if err != nil { - return body, nil, stale, err - } - if out != objPtr { - return body, nil, stale, fmt.Errorf("unable to decode object %s into %v", gvk.String(), reflect.TypeOf(objPtr)) - } - // being unable to set the version does not prevent the object from being extracted - _ = h.versioner.UpdateObject(objPtr, node.ModifiedIndex) - return body, node, stale, err -} - -// Implements storage.Interface. -func (h *etcdHelper) GetToList(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate, listObj runtime.Object) error { - if ctx == nil { - glog.Errorf("Context is nil") - } - trace := utiltrace.New("GetToList " + getTypeName(listObj)) - listPtr, err := meta.GetItemsPtr(listObj) - if err != nil { - return err - } - key = path.Join(h.pathPrefix, key) - startTime := time.Now() - trace.Step("About to read etcd node") - - opts := &etcd.GetOptions{ - Quorum: h.quorum, - } - response, err := h.etcdKeysAPI.Get(ctx, key, opts) - trace.Step("Etcd node read") - metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime) - if err != nil { - if etcdutil.IsEtcdNotFound(err) { - if etcdErr, ok := err.(etcd.Error); ok { - return h.versioner.UpdateList(listObj, etcdErr.Index, "") - } - return fmt.Errorf("unexpected error from storage: %#v", err) - } - return toStorageErr(err, key, 0) - } - - nodes := make([]*etcd.Node, 0) - nodes = append(nodes, response.Node) - - if err := h.decodeNodeList(nodes, pred, listPtr); err != nil { - return err - } - trace.Step("Object decoded") - if err := h.versioner.UpdateList(listObj, response.Index, ""); err != nil { - return err - } - return nil -} - -// decodeNodeList walks the tree of each node in the list and decodes into the specified object -func (h *etcdHelper) decodeNodeList(nodes []*etcd.Node, pred storage.SelectionPredicate, slicePtr interface{}) error { - trace := utiltrace.New("decodeNodeList " + getTypeName(slicePtr)) - defer trace.LogIfLong(400 * time.Millisecond) - v, err := conversion.EnforcePtr(slicePtr) - if err != nil || v.Kind() != reflect.Slice { - // This should not happen at runtime. - panic("need ptr to slice") - } - for _, node := range nodes { - if node.Dir { - // IMPORTANT: do not log each key as a discrete step in the trace log - // as it produces an immense amount of log spam when there is a large - // amount of content in the list. - if err := h.decodeNodeList(node.Nodes, pred, slicePtr); err != nil { - return err - } - continue - } - if obj, found := h.getFromCache(node.ModifiedIndex, pred); found { - // obj != nil iff it matches the pred function. - if obj != nil { - v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) - } - } else { - body, _, err := h.transformer.TransformStringFromStorage(node.Value) - if err != nil { - // omit items from lists and watches that cannot be transformed, but log the error - utilruntime.HandleError(fmt.Errorf("unable to transform key %q: %v", node.Key, err)) - continue - } - - obj, _, err := h.codec.Decode([]byte(body), nil, reflect.New(v.Type().Elem()).Interface().(runtime.Object)) - if err != nil { - return err - } - // being unable to set the version does not prevent the object from being extracted - _ = h.versioner.UpdateObject(obj, node.ModifiedIndex) - if matched, err := pred.Matches(obj); err == nil && matched { - v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) - } - if node.ModifiedIndex != 0 { - h.addToCache(node.ModifiedIndex, obj) - } - } - } - trace.Step(fmt.Sprintf("Decoded %v nodes", len(nodes))) - return nil -} - -// Implements storage.Interface. -func (h *etcdHelper) List(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate, listObj runtime.Object) error { - if ctx == nil { - glog.Errorf("Context is nil") - } - trace := utiltrace.New("List " + getTypeName(listObj)) - defer trace.LogIfLong(400 * time.Millisecond) - listPtr, err := meta.GetItemsPtr(listObj) - if err != nil { - return err - } - key = path.Join(h.pathPrefix, key) - startTime := time.Now() - trace.Step("About to list etcd node") - nodes, index, err := h.listEtcdNode(ctx, key) - trace.Step("Etcd node listed") - metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime) - if err != nil { - return err - } - if err := h.decodeNodeList(nodes, pred, listPtr); err != nil { - return err - } - trace.Step("Node list decoded") - if err := h.versioner.UpdateList(listObj, index, ""); err != nil { - return err - } - return nil -} - -func (h *etcdHelper) listEtcdNode(ctx context.Context, key string) ([]*etcd.Node, uint64, error) { - if ctx == nil { - glog.Errorf("Context is nil") - } - opts := etcd.GetOptions{ - Recursive: true, - Sort: true, - Quorum: h.quorum, - } - result, err := h.etcdKeysAPI.Get(ctx, key, &opts) - if err != nil { - var index uint64 - if etcdError, ok := err.(etcd.Error); ok { - index = etcdError.Index - } - nodes := make([]*etcd.Node, 0) - if etcdutil.IsEtcdNotFound(err) { - return nodes, index, nil - } else { - return nodes, index, toStorageErr(err, key, 0) - } - } - return result.Node.Nodes, result.Index, nil -} - -// Implements storage.Interface. -func (h *etcdHelper) GuaranteedUpdate( - ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, - preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, _ ...runtime.Object) error { - // Ignore the suggestion about current object. - if ctx == nil { - glog.Errorf("Context is nil") - } - v, err := conversion.EnforcePtr(ptrToType) - if err != nil { - // Panic is appropriate, because this is a programming error. - panic("need ptr to type") - } - key = path.Join(h.pathPrefix, key) - for { - obj := reflect.New(v.Type()).Interface().(runtime.Object) - origBody, node, res, stale, err := h.bodyAndExtractObj(ctx, key, obj, ignoreNotFound) - if err != nil { - return toStorageErr(err, key, 0) - } - if err := preconditions.Check(key, obj); err != nil { - return toStorageErr(err, key, 0) - } - meta := storage.ResponseMeta{} - if node != nil { - meta.TTL = node.TTL - meta.ResourceVersion = node.ModifiedIndex - } - // Get the object to be written by calling tryUpdate. - ret, newTTL, err := tryUpdate(obj, meta) - if err != nil { - return toStorageErr(err, key, 0) - } - - index := uint64(0) - ttl := uint64(0) - if node != nil { - index = node.ModifiedIndex - if node.TTL != 0 { - ttl = uint64(node.TTL) - } - if node.Expiration != nil && ttl == 0 { - ttl = 1 - } - } else if res != nil { - index = res.Index - } - - if newTTL != nil { - if ttl != 0 && *newTTL == 0 { - // TODO: remove this after we have verified this is no longer an issue - glog.V(4).Infof("GuaranteedUpdate is clearing TTL for %q, may not be intentional", key) - } - ttl = *newTTL - } - - // Since update object may have a resourceVersion set, we need to clear it here. - if err := h.versioner.PrepareObjectForStorage(ret); err != nil { - return errors.New("resourceVersion cannot be set on objects store in etcd") - } - - newBodyData, err := runtime.Encode(h.codec, ret) - if err != nil { - return err - } - newBody := string(newBodyData) - data, err := h.transformer.TransformStringToStorage(newBody) - if err != nil { - return storage.NewInternalError(err.Error()) - } - - // First time this key has been used, try creating new value. - if index == 0 { - startTime := time.Now() - opts := etcd.SetOptions{ - TTL: time.Duration(ttl) * time.Second, - PrevExist: etcd.PrevNoExist, - } - response, err := h.etcdKeysAPI.Set(ctx, key, data, &opts) - metrics.RecordEtcdRequestLatency("create", getTypeName(ptrToType), startTime) - if etcdutil.IsEtcdNodeExist(err) { - continue - } - _, _, _, err = h.extractObj(response, err, ptrToType, false, false) - return toStorageErr(err, key, 0) - } - - // If we don't send an update, we simply return the currently existing - // version of the object. However, the value transformer may indicate that - // the on disk representation has changed and that we must commit an update. - if newBody == origBody && !stale { - _, _, _, err := h.extractObj(res, nil, ptrToType, ignoreNotFound, false) - return err - } - - startTime := time.Now() - // Swap origBody with data, if origBody is the latest etcd data. - opts := etcd.SetOptions{ - PrevIndex: index, - TTL: time.Duration(ttl) * time.Second, - } - response, err := h.etcdKeysAPI.Set(ctx, key, data, &opts) - metrics.RecordEtcdRequestLatency("compareAndSwap", getTypeName(ptrToType), startTime) - if etcdutil.IsEtcdTestFailed(err) { - // Try again. - continue - } - _, _, _, err = h.extractObj(response, err, ptrToType, false, false) - return toStorageErr(err, key, int64(index)) - } -} - -func (*etcdHelper) Count(pathPerfix string) (int64, error) { - return 0, fmt.Errorf("Count is unimplemented for etcd2!") -} - -// etcdCache defines interface used for caching objects stored in etcd. Objects are keyed by -// their Node.ModifiedIndex, which is unique across all types. -// All implementations must be thread-safe. -type etcdCache interface { - getFromCache(index uint64, pred storage.SelectionPredicate) (runtime.Object, bool) - addToCache(index uint64, obj runtime.Object) -} - -func getTypeName(obj interface{}) string { - return reflect.TypeOf(obj).String() -} - -func (h *etcdHelper) getFromCache(index uint64, pred storage.SelectionPredicate) (runtime.Object, bool) { - startTime := time.Now() - defer func() { - metrics.ObserveGetCache(startTime) - }() - obj, found := h.cache.Get(index) - if found { - if matched, err := pred.Matches(obj.(runtime.Object)); err != nil || !matched { - return nil, true - } - // We should not return the object itself to avoid polluting the cache if someone - // modifies returned values. - objCopy := obj.(runtime.Object).DeepCopyObject() - metrics.ObserveCacheHit() - return objCopy.(runtime.Object), true - } - metrics.ObserveCacheMiss() - return nil, false -} - -func (h *etcdHelper) addToCache(index uint64, obj runtime.Object) { - startTime := time.Now() - defer func() { - metrics.ObserveAddCache(startTime) - }() - objCopy := obj.DeepCopyObject() - isOverwrite := h.cache.Add(index, objCopy) - if !isOverwrite { - metrics.ObserveNewEntry() - } -} - -func toStorageErr(err error, key string, rv int64) error { - if err == nil { - return nil - } - switch { - case etcdutil.IsEtcdNotFound(err): - return storage.NewKeyNotFoundError(key, rv) - case etcdutil.IsEtcdNodeExist(err): - return storage.NewKeyExistsError(key, rv) - case etcdutil.IsEtcdTestFailed(err): - return storage.NewResourceVersionConflictsError(key, rv) - case etcdutil.IsEtcdUnreachable(err): - return storage.NewUnreachableError(key, rv) - default: - return err - } -} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher.go b/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher.go deleted file mode 100644 index d3a2e3c53adde..0000000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher.go +++ /dev/null @@ -1,496 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package etcd - -import ( - "context" - "fmt" - "net/http" - "reflect" - "sync" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/apiserver/pkg/storage" - etcdutil "k8s.io/apiserver/pkg/storage/etcd/util" - - etcd "github.com/coreos/etcd/client" - "github.com/golang/glog" -) - -// Etcd watch event actions -const ( - EtcdCreate = "create" - EtcdGet = "get" - EtcdSet = "set" - EtcdCAS = "compareAndSwap" - EtcdDelete = "delete" - EtcdCAD = "compareAndDelete" - EtcdExpire = "expire" -) - -// TransformFunc attempts to convert an object to another object for use with a watcher. -type TransformFunc func(runtime.Object) (runtime.Object, error) - -// includeFunc returns true if the given key should be considered part of a watch -type includeFunc func(key string) bool - -// exceptKey is an includeFunc that returns false when the provided key matches the watched key -func exceptKey(except string) includeFunc { - return func(key string) bool { - return key != except - } -} - -// etcdWatcher converts a native etcd watch to a watch.Interface. -type etcdWatcher struct { - // HighWaterMarks for performance debugging. - // Important: Since HighWaterMark is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platforms - // See: https://golang.org/pkg/sync/atomic/ for more information - incomingHWM storage.HighWaterMark - outgoingHWM storage.HighWaterMark - - encoding runtime.Codec - // Note that versioner is required for etcdWatcher to work correctly. - // There is no public constructor of it, so be careful when manipulating - // with it manually. - versioner storage.Versioner - transform TransformFunc - valueTransformer ValueTransformer - - list bool // If we're doing a recursive watch, should be true. - quorum bool // If we enable quorum, should be true - include includeFunc - pred storage.SelectionPredicate - - etcdIncoming chan *etcd.Response - etcdError chan error - ctx context.Context - cancel context.CancelFunc - etcdCallEnded chan struct{} - - outgoing chan watch.Event - userStop chan struct{} - stopped bool - stopLock sync.Mutex - // wg is used to avoid calls to etcd after Stop(), and to make sure - // that the translate goroutine is not leaked. - wg sync.WaitGroup - - // Injectable for testing. Send the event down the outgoing channel. - emit func(watch.Event) - - cache etcdCache -} - -// newEtcdWatcher returns a new etcdWatcher; if list is true, watch sub-nodes. -// The versioner must be able to handle the objects that transform creates. -func newEtcdWatcher(list bool, quorum bool, include includeFunc, pred storage.SelectionPredicate, - encoding runtime.Codec, versioner storage.Versioner, transform TransformFunc, - valueTransformer ValueTransformer, cache etcdCache) *etcdWatcher { - w := &etcdWatcher{ - encoding: encoding, - versioner: versioner, - transform: transform, - valueTransformer: valueTransformer, - - list: list, - quorum: quorum, - include: include, - pred: pred, - // Buffer this channel, so that the etcd client is not forced - // to context switch with every object it gets, and so that a - // long time spent decoding an object won't block the *next* - // object. Basically, we see a lot of "401 window exceeded" - // errors from etcd, and that's due to the client not streaming - // results but rather getting them one at a time. So we really - // want to never block the etcd client, if possible. The 100 is - // mostly arbitrary--we know it goes as high as 50, though. - // There's a V(2) log message that prints the length so we can - // monitor how much of this buffer is actually used. - etcdIncoming: make(chan *etcd.Response, 100), - etcdError: make(chan error, 1), - // Similarly to etcdIncomming, we don't want to force context - // switch on every new incoming object. - outgoing: make(chan watch.Event, 100), - userStop: make(chan struct{}), - stopped: false, - wg: sync.WaitGroup{}, - cache: cache, - ctx: nil, - cancel: nil, - } - w.emit = func(e watch.Event) { - if curLen := int64(len(w.outgoing)); w.outgoingHWM.Update(curLen) { - // Monitor if this gets backed up, and how much. - glog.V(1).Infof("watch (%v): %v objects queued in outgoing channel.", reflect.TypeOf(e.Object).String(), curLen) - } - // Give up on user stop, without this we leak a lot of goroutines in tests. - select { - case w.outgoing <- e: - case <-w.userStop: - } - } - // translate will call done. We need to Add() here because otherwise, - // if Stop() gets called before translate gets started, there'd be a - // problem. - w.wg.Add(1) - go w.translate() - return w -} - -// etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called -// as a goroutine. -func (w *etcdWatcher) etcdWatch(ctx context.Context, client etcd.KeysAPI, key string, resourceVersion uint64) { - defer utilruntime.HandleCrash() - defer close(w.etcdError) - defer close(w.etcdIncoming) - - // All calls to etcd are coming from this function - once it is finished - // no other call to etcd should be generated by this watcher. - done := func() {} - - // We need to be prepared, that Stop() can be called at any time. - // It can potentially also be called, even before this function is called. - // If that is the case, we simply skip all the code here. - // See #18928 for more details. - var watcher etcd.Watcher - returned := func() bool { - w.stopLock.Lock() - defer w.stopLock.Unlock() - if w.stopped { - // Watcher has already been stopped - don't event initiate it here. - return true - } - w.wg.Add(1) - done = w.wg.Done - // Perform initialization of watcher under lock - we want to avoid situation when - // Stop() is called in the meantime (which in tests can cause etcd termination and - // strange behavior here). - if resourceVersion == 0 { - latest, err := etcdGetInitialWatchState(ctx, client, key, w.list, w.quorum, w.etcdIncoming) - if err != nil { - w.etcdError <- err - return true - } - resourceVersion = latest - } - - opts := etcd.WatcherOptions{ - Recursive: w.list, - AfterIndex: resourceVersion, - } - watcher = client.Watcher(key, &opts) - w.ctx, w.cancel = context.WithCancel(ctx) - return false - }() - defer done() - if returned { - return - } - - for { - resp, err := watcher.Next(w.ctx) - if err != nil { - w.etcdError <- err - return - } - w.etcdIncoming <- resp - } -} - -// etcdGetInitialWatchState turns an etcd Get request into a watch equivalent -func etcdGetInitialWatchState(ctx context.Context, client etcd.KeysAPI, key string, recursive bool, quorum bool, incoming chan<- *etcd.Response) (resourceVersion uint64, err error) { - opts := etcd.GetOptions{ - Recursive: recursive, - Sort: false, - Quorum: quorum, - } - resp, err := client.Get(ctx, key, &opts) - if err != nil { - if !etcdutil.IsEtcdNotFound(err) { - utilruntime.HandleError(fmt.Errorf("watch was unable to retrieve the current index for the provided key (%q): %v", key, err)) - return resourceVersion, toStorageErr(err, key, 0) - } - if etcdError, ok := err.(etcd.Error); ok { - resourceVersion = etcdError.Index - } - return resourceVersion, nil - } - resourceVersion = resp.Index - convertRecursiveResponse(resp.Node, resp, incoming) - return -} - -// convertRecursiveResponse turns a recursive get response from etcd into individual response objects -// by copying the original response. This emulates the behavior of a recursive watch. -func convertRecursiveResponse(node *etcd.Node, response *etcd.Response, incoming chan<- *etcd.Response) { - if node.Dir { - for i := range node.Nodes { - convertRecursiveResponse(node.Nodes[i], response, incoming) - } - return - } - copied := *response - copied.Action = "get" - copied.Node = node - incoming <- &copied -} - -// translate pulls stuff from etcd, converts, and pushes out the outgoing channel. Meant to be -// called as a goroutine. -func (w *etcdWatcher) translate() { - defer w.wg.Done() - defer close(w.outgoing) - defer utilruntime.HandleCrash() - - for { - select { - case err := <-w.etcdError: - if err != nil { - var status *metav1.Status - switch { - case etcdutil.IsEtcdWatchExpired(err): - status = &metav1.Status{ - Status: metav1.StatusFailure, - Message: err.Error(), - Code: http.StatusGone, // Gone - Reason: metav1.StatusReasonExpired, - } - // TODO: need to generate errors using api/errors which has a circular dependency on this package - // no other way to inject errors - // case etcdutil.IsEtcdUnreachable(err): - // status = errors.NewServerTimeout(...) - default: - status = &metav1.Status{ - Status: metav1.StatusFailure, - Message: err.Error(), - Code: http.StatusInternalServerError, - Reason: metav1.StatusReasonInternalError, - } - } - w.emit(watch.Event{ - Type: watch.Error, - Object: status, - }) - } - return - case <-w.userStop: - return - case res, ok := <-w.etcdIncoming: - if ok { - if curLen := int64(len(w.etcdIncoming)); w.incomingHWM.Update(curLen) { - // Monitor if this gets backed up, and how much. - glog.V(1).Infof("watch: %v objects queued in incoming channel.", curLen) - } - w.sendResult(res) - } - // If !ok, don't return here-- must wait for etcdError channel - // to give an error or be closed. - } - } -} - -// decodeObject extracts an object from the provided etcd node or returns an error. -func (w *etcdWatcher) decodeObject(node *etcd.Node) (runtime.Object, error) { - if obj, found := w.cache.getFromCache(node.ModifiedIndex, storage.Everything); found { - return obj, nil - } - - body, _, err := w.valueTransformer.TransformStringFromStorage(node.Value) - if err != nil { - return nil, err - } - - obj, err := runtime.Decode(w.encoding, []byte(body)) - if err != nil { - return nil, err - } - - // ensure resource version is set on the object we load from etcd - if err := w.versioner.UpdateObject(obj, node.ModifiedIndex); err != nil { - utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", node.ModifiedIndex, obj, err)) - } - - // perform any necessary transformation - if w.transform != nil { - obj, err = w.transform(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("failure to transform api object %#v: %v", obj, err)) - return nil, err - } - } - - if node.ModifiedIndex != 0 { - w.cache.addToCache(node.ModifiedIndex, obj) - } - return obj, nil -} - -func (w *etcdWatcher) sendAdd(res *etcd.Response) { - if res.Node == nil { - utilruntime.HandleError(fmt.Errorf("unexpected nil node: %#v", res)) - return - } - if w.include != nil && !w.include(res.Node.Key) { - return - } - obj, err := w.decodeObject(res.Node) - if err != nil { - utilruntime.HandleError(fmt.Errorf("failure to decode api object: %v\n'%v' from %#v %#v", err, string(res.Node.Value), res, res.Node)) - // TODO: expose an error through watch.Interface? - // Ignore this value. If we stop the watch on a bad value, a client that uses - // the resourceVersion to resume will never be able to get past a bad value. - return - } - if matched, err := w.pred.Matches(obj); err != nil || !matched { - return - } - action := watch.Added - w.emit(watch.Event{ - Type: action, - Object: obj, - }) -} - -func (w *etcdWatcher) sendModify(res *etcd.Response) { - if res.Node == nil { - glog.Errorf("unexpected nil node: %#v", res) - return - } - if w.include != nil && !w.include(res.Node.Key) { - return - } - curObj, err := w.decodeObject(res.Node) - if err != nil { - utilruntime.HandleError(fmt.Errorf("failure to decode api object: %v\n'%v' from %#v %#v", err, string(res.Node.Value), res, res.Node)) - // TODO: expose an error through watch.Interface? - // Ignore this value. If we stop the watch on a bad value, a client that uses - // the resourceVersion to resume will never be able to get past a bad value. - return - } - curObjPasses := false - if matched, err := w.pred.Matches(curObj); err == nil && matched { - curObjPasses = true - } - oldObjPasses := false - var oldObj runtime.Object - if res.PrevNode != nil && res.PrevNode.Value != "" { - // Ignore problems reading the old object. - if oldObj, err = w.decodeObject(res.PrevNode); err == nil { - if err := w.versioner.UpdateObject(oldObj, res.Node.ModifiedIndex); err != nil { - utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", res.Node.ModifiedIndex, oldObj, err)) - } - if matched, err := w.pred.Matches(oldObj); err == nil && matched { - oldObjPasses = true - } - } - } - // Some changes to an object may cause it to start or stop matching a pred. - // We need to report those as adds/deletes. So we have to check both the previous - // and current value of the object. - switch { - case curObjPasses && oldObjPasses: - w.emit(watch.Event{ - Type: watch.Modified, - Object: curObj, - }) - case curObjPasses && !oldObjPasses: - w.emit(watch.Event{ - Type: watch.Added, - Object: curObj, - }) - case !curObjPasses && oldObjPasses: - w.emit(watch.Event{ - Type: watch.Deleted, - Object: oldObj, - }) - } - // Do nothing if neither new nor old object passed the pred. -} - -func (w *etcdWatcher) sendDelete(res *etcd.Response) { - if res.PrevNode == nil { - utilruntime.HandleError(fmt.Errorf("unexpected nil prev node: %#v", res)) - return - } - if w.include != nil && !w.include(res.PrevNode.Key) { - return - } - node := *res.PrevNode - if res.Node != nil { - // Note that this sends the *old* object with the etcd index for the time at - // which it gets deleted. This will allow users to restart the watch at the right - // index. - node.ModifiedIndex = res.Node.ModifiedIndex - } - obj, err := w.decodeObject(&node) - if err != nil { - utilruntime.HandleError(fmt.Errorf("failure to decode api object: %v\nfrom %#v %#v", err, res, res.Node)) - // TODO: expose an error through watch.Interface? - // Ignore this value. If we stop the watch on a bad value, a client that uses - // the resourceVersion to resume will never be able to get past a bad value. - return - } - if matched, err := w.pred.Matches(obj); err != nil || !matched { - return - } - w.emit(watch.Event{ - Type: watch.Deleted, - Object: obj, - }) -} - -func (w *etcdWatcher) sendResult(res *etcd.Response) { - switch res.Action { - case EtcdCreate, EtcdGet: - // "Get" will only happen in watch 0 case, where we explicitly want ADDED event - // for initial state. - w.sendAdd(res) - case EtcdSet, EtcdCAS: - w.sendModify(res) - case EtcdDelete, EtcdExpire, EtcdCAD: - w.sendDelete(res) - default: - utilruntime.HandleError(fmt.Errorf("unknown action: %v", res.Action)) - } -} - -// ResultChan implements watch.Interface. -func (w *etcdWatcher) ResultChan() <-chan watch.Event { - return w.outgoing -} - -// Stop implements watch.Interface. -func (w *etcdWatcher) Stop() { - w.stopLock.Lock() - if w.cancel != nil { - w.cancel() - w.cancel = nil - } - if !w.stopped { - w.stopped = true - close(w.userStop) - } - w.stopLock.Unlock() - - // Wait until all calls to etcd are finished and no other - // will be issued. - w.wg.Wait() -} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD.bazel deleted file mode 100644 index 3b56905f5f81f..0000000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "etcd_util.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/apiserver/pkg/storage/etcd/util", - importpath = "k8s.io/apiserver/pkg/storage/etcd/util", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/coreos/etcd/client:go_default_library"], -) diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd/util/doc.go b/vendor/k8s.io/apiserver/pkg/storage/etcd/util/doc.go deleted file mode 100644 index 97241a44f19e2..0000000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd/util/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package util holds generic etcd-related utility functions that any user of ectd might want to -// use, without pulling in kubernetes-specific code. -package util // import "k8s.io/apiserver/pkg/storage/etcd/util" diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd/util/etcd_util.go b/vendor/k8s.io/apiserver/pkg/storage/etcd/util/etcd_util.go deleted file mode 100644 index 7c71fe24fc53f..0000000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd/util/etcd_util.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - - etcd "github.com/coreos/etcd/client" -) - -// IsEtcdNotFound returns true if and only if err is an etcd not found error. -func IsEtcdNotFound(err error) bool { - return isEtcdErrorNum(err, etcd.ErrorCodeKeyNotFound) -} - -// IsEtcdNodeExist returns true if and only if err is an etcd node already exist error. -func IsEtcdNodeExist(err error) bool { - return isEtcdErrorNum(err, etcd.ErrorCodeNodeExist) -} - -// IsEtcdTestFailed returns true if and only if err is an etcd write conflict. -func IsEtcdTestFailed(err error) bool { - return isEtcdErrorNum(err, etcd.ErrorCodeTestFailed) -} - -// IsEtcdWatchExpired returns true if and only if err indicates the watch has expired. -func IsEtcdWatchExpired(err error) bool { - // NOTE: This seems weird why it wouldn't be etcd.ErrorCodeWatcherCleared - // I'm using the previous matching value - return isEtcdErrorNum(err, etcd.ErrorCodeEventIndexCleared) -} - -// IsEtcdUnreachable returns true if and only if err indicates the server could not be reached. -func IsEtcdUnreachable(err error) bool { - // NOTE: The logic has changed previous error code no longer applies - return err == etcd.ErrClusterUnavailable -} - -// isEtcdErrorNum returns true if and only if err is an etcd error, whose errorCode matches errorCode -func isEtcdErrorNum(err error, errorCode int) bool { - if err != nil { - if etcdError, ok := err.(etcd.Error); ok { - return etcdError.Code == errorCode - } - // NOTE: There are other error types returned - } - return false -} - -// GetEtcdVersion performs a version check against the provided Etcd server, -// returning the string response, and error (if any). -func GetEtcdVersion(host string) (string, error) { - response, err := http.Get(host + "/version") - if err != nil { - return "", err - } - defer response.Body.Close() - if response.StatusCode != http.StatusOK { - return "", fmt.Errorf("unsuccessful response from etcd server %q: %v", host, err) - } - versionBytes, err := ioutil.ReadAll(response.Body) - if err != nil { - return "", err - } - return string(versionBytes), nil -} - -type etcdHealth struct { - // Note this has to be public so the json library can modify it. - Health string `json:"health"` -} - -func EtcdHealthCheck(data []byte) error { - obj := etcdHealth{} - if err := json.Unmarshal(data, &obj); err != nil { - return err - } - if obj.Health != "true" { - return fmt.Errorf("Unhealthy status: %s", obj.Health) - } - return nil -} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD.bazel index 7bc520148ddf7..edda4cecb20e5 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD.bazel @@ -17,7 +17,6 @@ go_library( "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", @@ -28,5 +27,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/storage/etcd:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go index bdcd5bcb60db4..d4524f4922111 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go @@ -23,7 +23,7 @@ import ( "time" "github.com/coreos/etcd/clientv3" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -51,7 +51,7 @@ func StartCompactor(ctx context.Context, client *clientv3.Client, compactInterva // Currently we rely on endpoints to differentiate clusters. for _, ep := range client.Endpoints() { if _, ok := endpointsMap[ep]; ok { - glog.V(4).Infof("compactor already exists for endpoints %v", client.Endpoints()) + klog.V(4).Infof("compactor already exists for endpoints %v", client.Endpoints()) return } } @@ -121,7 +121,7 @@ func compactor(ctx context.Context, client *clientv3.Client, interval time.Durat compactTime, rev, err = compact(ctx, client, compactTime, rev) if err != nil { - glog.Errorf("etcd: endpoint (%v) compact failed: %v", client.Endpoints(), err) + klog.Errorf("etcd: endpoint (%v) compact failed: %v", client.Endpoints(), err) continue } } @@ -157,6 +157,6 @@ func compact(ctx context.Context, client *clientv3.Client, t, rev int64) (int64, if _, err = client.Compact(ctx, rev); err != nil { return curTime, curRev, err } - glog.V(4).Infof("etcd: compacted rev (%d), endpoints (%v)", rev, client.Endpoints()) + klog.V(4).Infof("etcd: compacted rev (%d), endpoints (%v)", rev, client.Endpoints()) return curTime, curRev, nil } diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go index dc06ac52077b2..e7e554c6270a2 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go @@ -51,7 +51,7 @@ func newDefaultLeaseManager(client *clientv3.Client) *leaseManager { // value x means x*100%. func newLeaseManager(client *clientv3.Client, leaseReuseDurationSeconds int64, leaseReuseDurationPercent float64) *leaseManager { return &leaseManager{ - client: client, + client: client, leaseReuseDurationSeconds: leaseReuseDurationSeconds, leaseReuseDurationPercent: leaseReuseDurationPercent, } diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go index d8aa8b2fd37c0..129b593f53b66 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -29,13 +29,12 @@ import ( "time" "github.com/coreos/etcd/clientv3" - "github.com/golang/glog" + "k8s.io/klog" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/etcd" @@ -83,16 +82,10 @@ type objState struct { // New returns an etcd3 implementation of storage.Interface. func New(c *clientv3.Client, codec runtime.Codec, prefix string, transformer value.Transformer, pagingEnabled bool) storage.Interface { - return newStore(c, true, pagingEnabled, codec, prefix, transformer) + return newStore(c, pagingEnabled, codec, prefix, transformer) } -// NewWithNoQuorumRead returns etcd3 implementation of storage.Interface -// where Get operations don't require quorum read. -func NewWithNoQuorumRead(c *clientv3.Client, codec runtime.Codec, prefix string, transformer value.Transformer, pagingEnabled bool) storage.Interface { - return newStore(c, false, pagingEnabled, codec, prefix, transformer) -} - -func newStore(c *clientv3.Client, quorumRead, pagingEnabled bool, codec runtime.Codec, prefix string, transformer value.Transformer) *store { +func newStore(c *clientv3.Client, pagingEnabled bool, codec runtime.Codec, prefix string, transformer value.Transformer) *store { versioner := etcd.APIObjectVersioner{} result := &store{ client: c, @@ -107,11 +100,6 @@ func newStore(c *clientv3.Client, quorumRead, pagingEnabled bool, codec runtime. watcher: newWatcher(c, codec, versioner, transformer), leaseManager: newDefaultLeaseManager(c), } - if !quorumRead { - // In case of non-quorum reads, we can set WithSerializable() - // options for all Get operations. - result.getOps = append(result.getOps, clientv3.WithSerializable()) - } return result } @@ -248,7 +236,7 @@ func (s *store) conditionalDelete(ctx context.Context, key string, out runtime.O } if !txnResp.Succeeded { getResp = (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange()) - glog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key) + klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key) continue } return decode(s.codec, s.versioner, origState.data, out, origState.rev) @@ -364,7 +352,7 @@ func (s *store) GuaranteedUpdate( trace.Step("Transaction committed") if !txnResp.Succeeded { getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange()) - glog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key) + klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key) origState, err = s.getState(getResp, key, v, ignoreNotFound) if err != nil { return err @@ -594,8 +582,7 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key)) if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to transform key %q: %v", kv.Key, err)) - continue + return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err) } if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner); err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go index c1216d5884cdd..d450038eff7a8 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -32,7 +32,7 @@ import ( "k8s.io/apiserver/pkg/storage/value" "github.com/coreos/etcd/clientv3" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -191,7 +191,7 @@ func (wc *watchChan) sync() error { func (wc *watchChan) startWatching(watchClosedCh chan struct{}) { if wc.initialRev == 0 { if err := wc.sync(); err != nil { - glog.Errorf("failed to sync with latest state: %v", err) + klog.Errorf("failed to sync with latest state: %v", err) wc.sendError(err) return } @@ -205,7 +205,7 @@ func (wc *watchChan) startWatching(watchClosedCh chan struct{}) { if wres.Err() != nil { err := wres.Err() // If there is an error on server (e.g. compaction), the channel will return it before closed. - glog.Errorf("watch chan error: %v", err) + klog.Errorf("watch chan error: %v", err) wc.sendError(err) return } @@ -232,7 +232,7 @@ func (wc *watchChan) processEvent(wg *sync.WaitGroup) { continue } if len(wc.resultChan) == outgoingBufSize { - glog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+ + klog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+ "Probably caused by slow dispatching events to watchers", outgoingBufSize) } // If user couldn't receive results fast enough, we also block incoming events from watcher. @@ -265,7 +265,7 @@ func (wc *watchChan) acceptAll() bool { func (wc *watchChan) transform(e *event) (res *watch.Event) { curObj, oldObj, err := wc.prepareObjs(e) if err != nil { - glog.Errorf("failed to prepare current and previous objects: %v", err) + klog.Errorf("failed to prepare current and previous objects: %v", err) wc.sendError(err) return nil } @@ -339,7 +339,7 @@ func (wc *watchChan) sendError(err error) { func (wc *watchChan) sendEvent(e *event) { if len(wc.incomingEventChan) == incomingBufSize { - glog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+ + klog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+ "Probably caused by slow decoding, user not receiving fast, or other processing logic", incomingBufSize) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go index 8d7ecf37c6820..f18ac76dd9fbe 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go @@ -25,7 +25,6 @@ import ( const ( StorageTypeUnset = "" - StorageTypeETCD2 = "etcd2" StorageTypeETCD3 = "etcd3" DefaultCompactInterval = 5 * time.Minute @@ -33,7 +32,7 @@ const ( // Config is configuration for creating a storage backend. type Config struct { - // Type defines the type of storage backend, e.g. "etcd2", etcd3". Default ("") is "etcd3". + // Type defines the type of storage backend. Default ("") is "etcd3". Type string // Prefix is the prefix to all keys passed to storage.Interface methods. Prefix string @@ -43,17 +42,11 @@ type Config struct { KeyFile string CertFile string CAFile string - // Quorum indicates that whether read operations should be quorum-level consistent. - Quorum bool // Paging indicates whether the server implementation should allow paging (if it is // supported). This is generally configured by feature gating, or by a specific // resource type not wishing to allow paging, and is not intended for end users to // set. Paging bool - // DeserializationCacheSize is the size of cache of deserialized objects. - // Currently this is only supported in etcd2. - // We will drop the cache once using protobuf. - DeserializationCacheSize int Codec runtime.Codec // Transformer allows the value to be transformed prior to persisting into etcd. @@ -69,12 +62,8 @@ type Config struct { func NewDefaultConfig(prefix string, codec runtime.Codec) *Config { return &Config{ - Prefix: prefix, - // Default cache size to 0 - if unset, its size will be set based on target - // memory usage. - DeserializationCacheSize: 0, + Prefix: prefix, Codec: codec, CompactionInterval: DefaultCompactInterval, - Quorum: true, } } diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD.bazel index b714863225a2d..d103beac22559 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD.bazel @@ -3,7 +3,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "etcd2.go", "etcd3.go", "factory.go", ], @@ -11,15 +10,12 @@ go_library( importpath = "k8s.io/apiserver/pkg/storage/storagebackend/factory", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/coreos/etcd/client:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", "//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/etcd:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd3:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd2.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd2.go deleted file mode 100644 index 292553a17e2ad..0000000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd2.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package factory - -import ( - "context" - "fmt" - "net" - "net/http" - "time" - - etcd2client "github.com/coreos/etcd/client" - "github.com/coreos/etcd/pkg/transport" - - utilnet "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apiserver/pkg/storage" - "k8s.io/apiserver/pkg/storage/etcd" - "k8s.io/apiserver/pkg/storage/storagebackend" -) - -func newETCD2HealthCheck(c storagebackend.Config) (func() error, error) { - tr, err := newTransportForETCD2(c.CertFile, c.KeyFile, c.CAFile) - if err != nil { - return nil, err - } - - client, err := newETCD2Client(tr, c.ServerList) - if err != nil { - return nil, err - } - - members := etcd2client.NewMembersAPI(client) - - return func() error { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - if _, err := members.List(ctx); err != nil { - return fmt.Errorf("error listing etcd members: %v", err) - } - return nil - }, nil -} - -func newETCD2Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, error) { - tr, err := newTransportForETCD2(c.CertFile, c.KeyFile, c.CAFile) - if err != nil { - return nil, nil, err - } - client, err := newETCD2Client(tr, c.ServerList) - if err != nil { - return nil, nil, err - } - s := etcd.NewEtcdStorage(client, c.Codec, c.Prefix, c.Quorum, c.DeserializationCacheSize, etcd.IdentityTransformer) - return s, tr.CloseIdleConnections, nil -} - -func newETCD2Client(tr *http.Transport, serverList []string) (etcd2client.Client, error) { - cli, err := etcd2client.New(etcd2client.Config{ - Endpoints: serverList, - Transport: tr, - }) - if err != nil { - return nil, err - } - - return cli, nil -} - -func newTransportForETCD2(certFile, keyFile, caFile string) (*http.Transport, error) { - info := transport.TLSInfo{ - CertFile: certFile, - KeyFile: keyFile, - CAFile: caFile, - } - cfg, err := info.ClientConfig() - if err != nil { - return nil, err - } - // Copied from etcd.DefaultTransport declaration. - // TODO: Determine if transport needs optimization - tr := utilnet.SetTransportDefaults(&http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - TLSHandshakeTimeout: 10 * time.Second, - MaxIdleConnsPerHost: 500, - TLSClientConfig: cfg, - }) - return tr, nil -} diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go index cce71d216aba5..e18fe9acde7ec 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -104,8 +104,8 @@ func newETCD3Client(c storagebackend.Config) (*clientv3.Client, error) { Endpoints: c.ServerList, TLS: tlsConfig, } - client, err := clientv3.New(cfg) - return client, err + + return clientv3.New(cfg) } func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, error) { @@ -123,8 +123,5 @@ func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, e if transformer == nil { transformer = value.IdentityTransformer } - if c.Quorum { - return etcd3.New(client, c.Codec, c.Prefix, transformer, c.Paging), destroyFunc, nil - } - return etcd3.NewWithNoQuorumRead(client, c.Codec, c.Prefix, transformer, c.Paging), destroyFunc, nil + return etcd3.New(client, c.Codec, c.Prefix, transformer, c.Paging), destroyFunc, nil } diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go index bba1fa209a0e0..a1dc6c0fa89a0 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go @@ -29,13 +29,9 @@ type DestroyFunc func() // Create creates a storage backend based on given config. func Create(c storagebackend.Config) (storage.Interface, DestroyFunc, error) { switch c.Type { - case storagebackend.StorageTypeETCD2: - return newETCD2Storage(c) + case "etcd2": + return nil, nil, fmt.Errorf("%v is no longer a supported storage backend", c.Type) case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3: - // TODO: We have the following features to implement: - // - Support secure connection by using key, cert, and CA files. - // - Honor "https" scheme to support secure connection in gRPC. - // - Support non-quorum read. return newETCD3Storage(c) default: return nil, nil, fmt.Errorf("unknown storage type: %s", c.Type) @@ -45,8 +41,8 @@ func Create(c storagebackend.Config) (storage.Interface, DestroyFunc, error) { // CreateHealthCheck creates a healthcheck function based on given config. func CreateHealthCheck(c storagebackend.Config) (func() error, error) { switch c.Type { - case storagebackend.StorageTypeETCD2: - return newETCD2HealthCheck(c) + case "etcd2": + return nil, fmt.Errorf("%v is no longer a supported storage backend", c.Type) case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3: return newETCD3HealthCheck(c) default: diff --git a/vendor/k8s.io/apiserver/pkg/util/feature/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/util/feature/BUILD.bazel index 588434122223d..e767b7ea22406 100644 --- a/vendor/k8s.io/apiserver/pkg/util/feature/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/util/feature/BUILD.bazel @@ -7,7 +7,7 @@ go_library( importpath = "k8s.io/apiserver/pkg/util/feature", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go index 8847c1fb62546..a83dafd56abe8 100644 --- a/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go +++ b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go @@ -24,8 +24,8 @@ import ( "sync" "sync/atomic" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" ) type Feature string @@ -193,9 +193,9 @@ func (f *featureGate) SetFromMap(m map[string]bool) error { } if featureSpec.PreRelease == Deprecated { - glog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v) + klog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v) } else if featureSpec.PreRelease == GA { - glog.Warningf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v) + klog.Warningf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v) } } @@ -203,7 +203,7 @@ func (f *featureGate) SetFromMap(m map[string]bool) error { f.known.Store(known) f.enabled.Store(enabled) - glog.V(1).Infof("feature gates: %v", f.enabled) + klog.V(1).Infof("feature gates: %v", f.enabled) return nil } diff --git a/vendor/k8s.io/apiserver/pkg/util/flag/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/util/flag/BUILD.bazel index 0281d1ca98595..07ea773340922 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flag/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/util/flag/BUILD.bazel @@ -22,8 +22,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/docker/docker/pkg/term:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/util/flag/flags.go b/vendor/k8s.io/apiserver/pkg/util/flag/flags.go index 55a3ed34a8e70..d0fff8db2e49e 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flag/flags.go +++ b/vendor/k8s.io/apiserver/pkg/util/flag/flags.go @@ -20,8 +20,8 @@ import ( goflag "flag" "strings" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" ) // WordSepNormalizeFunc changes all flags that contain "_" separators @@ -36,7 +36,7 @@ func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { if strings.Contains(name, "_") { nname := strings.Replace(name, "_", "-", -1) - glog.Warningf("%s is DEPRECATED and will be removed in a future version. Use %s instead.", name, nname) + klog.Warningf("%s is DEPRECATED and will be removed in a future version. Use %s instead.", name, nname) return pflag.NormalizedName(nname) } @@ -49,6 +49,6 @@ func InitFlags() { pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) pflag.Parse() pflag.VisitAll(func(flag *pflag.Flag) { - glog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + klog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) }) } diff --git a/vendor/k8s.io/apiserver/pkg/util/logs/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/util/logs/BUILD.bazel index 3a29fa477118a..60ac217062092 100644 --- a/vendor/k8s.io/apiserver/pkg/util/logs/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/util/logs/BUILD.bazel @@ -7,8 +7,8 @@ go_library( importpath = "k8s.io/apiserver/pkg/util/logs", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/util/logs/logs.go b/vendor/k8s.io/apiserver/pkg/util/logs/logs.go index a62c06094dd77..3ffe9eeb29b7c 100644 --- a/vendor/k8s.io/apiserver/pkg/util/logs/logs.go +++ b/vendor/k8s.io/apiserver/pkg/util/logs/logs.go @@ -22,9 +22,9 @@ import ( "log" "time" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" ) const logFlushFreqFlagName = "log-flush-frequency" @@ -33,6 +33,7 @@ var logFlushFreq = pflag.Duration(logFlushFreqFlagName, 5*time.Second, "Maximum // TODO(thockin): This is temporary until we agree on log dirs and put those into each cmd. func init() { + klog.InitFlags(flag.CommandLine) flag.Set("logtostderr", "true") } @@ -42,38 +43,38 @@ func AddFlags(fs *pflag.FlagSet) { fs.AddFlag(pflag.Lookup(logFlushFreqFlagName)) } -// GlogWriter serves as a bridge between the standard log package and the glog package. -type GlogWriter struct{} +// KlogWriter serves as a bridge between the standard log package and the glog package. +type KlogWriter struct{} // Write implements the io.Writer interface. -func (writer GlogWriter) Write(data []byte) (n int, err error) { - glog.InfoDepth(1, string(data)) +func (writer KlogWriter) Write(data []byte) (n int, err error) { + klog.InfoDepth(1, string(data)) return len(data), nil } // InitLogs initializes logs the way we want for kubernetes. func InitLogs() { - log.SetOutput(GlogWriter{}) + log.SetOutput(KlogWriter{}) log.SetFlags(0) // The default glog flush interval is 5 seconds. - go wait.Forever(glog.Flush, *logFlushFreq) + go wait.Forever(klog.Flush, *logFlushFreq) } // FlushLogs flushes logs immediately. func FlushLogs() { - glog.Flush() + klog.Flush() } -// NewLogger creates a new log.Logger which sends logs to glog.Info. +// NewLogger creates a new log.Logger which sends logs to klog.Info. func NewLogger(prefix string) *log.Logger { - return log.New(GlogWriter{}, prefix, 0) + return log.New(KlogWriter{}, prefix, 0) } // GlogSetter is a setter to set glog level. func GlogSetter(val string) (string, error) { - var level glog.Level + var level klog.Level if err := level.Set(val); err != nil { - return "", fmt.Errorf("failed set glog.logging.verbosity %s: %v", val, err) + return "", fmt.Errorf("failed set klog.logging.verbosity %s: %v", val, err) } - return fmt.Sprintf("successfully set glog.logging.verbosity to %s", val), nil + return fmt.Sprintf("successfully set klog.logging.verbosity to %s", val), nil } diff --git a/vendor/k8s.io/apiserver/pkg/util/openapi/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/util/openapi/BUILD.bazel index 192f42a03984a..29f5acf6afe28 100644 --- a/vendor/k8s.io/apiserver/pkg/util/openapi/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/util/openapi/BUILD.bazel @@ -11,7 +11,6 @@ go_library( "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", "//vendor/github.com/googleapis/gnostic/compiler:go_default_library", "//vendor/gopkg.in/yaml.v2:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/util/openapi/proto.go b/vendor/k8s.io/apiserver/pkg/util/openapi/proto.go index 5641d1a141f6b..ba51ba5329b14 100644 --- a/vendor/k8s.io/apiserver/pkg/util/openapi/proto.go +++ b/vendor/k8s.io/apiserver/pkg/util/openapi/proto.go @@ -18,29 +18,17 @@ package openapi import ( "encoding/json" - "fmt" "github.com/go-openapi/spec" openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" "github.com/googleapis/gnostic/compiler" yaml "gopkg.in/yaml.v2" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kube-openapi/pkg/util/proto" ) -const ( - // groupVersionKindExtensionKey is the key used to lookup the - // GroupVersionKind value for an object definition from the - // definition's "extensions" map. - groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" -) - -// ToProtoSchema builds the proto formatted schema from an OpenAPI spec -func ToProtoSchema(openAPIDefinitions *spec.Definitions, gvk schema.GroupVersionKind) (proto.Schema, error) { - openAPISpec := newMinimalValidOpenAPISpec() - openAPISpec.Definitions = *openAPIDefinitions - +// ToProtoModels builds the proto formatted models from OpenAPI spec +func ToProtoModels(openAPISpec *spec.Swagger) (proto.Models, error) { specBytes, err := json.MarshalIndent(openAPISpec, " ", " ") if err != nil { return nil, err @@ -62,81 +50,5 @@ func ToProtoSchema(openAPIDefinitions *spec.Definitions, gvk schema.GroupVersion return nil, err } - for _, modelName := range models.ListModels() { - model := models.LookupModel(modelName) - if model == nil { - return nil, fmt.Errorf("the ListModels function returned a model that can't be looked-up") - } - gvkList := parseGroupVersionKind(model) - for _, modelGVK := range gvkList { - if modelGVK == gvk { - return model, nil - } - } - } - - return nil, fmt.Errorf("no model found with a %v tag matching %v", groupVersionKindExtensionKey, gvk) -} - -// newMinimalValidOpenAPISpec creates a minimal openapi spec with only the required fields filled in -func newMinimalValidOpenAPISpec() *spec.Swagger { - return &spec.Swagger{ - SwaggerProps: spec.SwaggerProps{ - Swagger: "2.0", - Info: &spec.Info{ - InfoProps: spec.InfoProps{ - Title: "Kubernetes", - Version: "0.0.0", - }, - }, - }, - } -} - -// parseGroupVersionKind gets and parses GroupVersionKind from the extension. Returns empty if it doesn't have one. -func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind { - extensions := s.GetExtensions() - - gvkListResult := []schema.GroupVersionKind{} - - // Get the extensions - gvkExtension, ok := extensions[groupVersionKindExtensionKey] - if !ok { - return []schema.GroupVersionKind{} - } - - // gvk extension must be a list of at least 1 element. - gvkList, ok := gvkExtension.([]interface{}) - if !ok { - return []schema.GroupVersionKind{} - } - - for _, gvk := range gvkList { - // gvk extension list must be a map with group, version, and - // kind fields - gvkMap, ok := gvk.(map[interface{}]interface{}) - if !ok { - continue - } - group, ok := gvkMap["group"].(string) - if !ok { - continue - } - version, ok := gvkMap["version"].(string) - if !ok { - continue - } - kind, ok := gvkMap["kind"].(string) - if !ok { - continue - } - - gvkListResult = append(gvkListResult, schema.GroupVersionKind{ - Group: group, - Version: version, - Kind: kind, - }) - } - - return gvkListResult + return models, nil } diff --git a/vendor/k8s.io/apiserver/pkg/util/trace/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/util/trace/BUILD.bazel index 3b3cbca843f9f..b068dadb0f509 100644 --- a/vendor/k8s.io/apiserver/pkg/util/trace/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/util/trace/BUILD.bazel @@ -6,5 +6,5 @@ go_library( importmap = "k8s.io/kops/vendor/k8s.io/apiserver/pkg/util/trace", importpath = "k8s.io/apiserver/pkg/util/trace", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/util/trace/trace.go b/vendor/k8s.io/apiserver/pkg/util/trace/trace.go index b2f31c5275d1f..9049a17d0deeb 100644 --- a/vendor/k8s.io/apiserver/pkg/util/trace/trace.go +++ b/vendor/k8s.io/apiserver/pkg/util/trace/trace.go @@ -22,7 +22,7 @@ import ( "math/rand" "time" - "github.com/golang/glog" + "k8s.io/klog" ) type traceStep struct { @@ -63,17 +63,17 @@ func (t *Trace) logWithStepThreshold(stepThreshold time.Duration) { lastStepTime := t.startTime for _, step := range t.steps { stepDuration := step.stepTime.Sub(lastStepTime) - if stepThreshold == 0 || stepDuration > stepThreshold || glog.V(4) { + if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] %v\n", tracenum, step.stepTime.Sub(t.startTime), stepDuration, step.msg)) } lastStepTime = step.stepTime } stepDuration := endTime.Sub(lastStepTime) - if stepThreshold == 0 || stepDuration > stepThreshold || glog.V(4) { + if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] END\n", tracenum, endTime.Sub(t.startTime), stepDuration)) } - glog.Info(buffer.String()) + klog.Info(buffer.String()) } func (t *Trace) LogIfLong(threshold time.Duration) { diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/util/webhook/BUILD.bazel index 6fee5f345d8c0..b8705c3d9cf52 100644 --- a/vendor/k8s.io/apiserver/pkg/util/webhook/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/BUILD.bazel @@ -7,6 +7,7 @@ go_library( "client.go", "error.go", "serviceresolver.go", + "validation.go", "webhook.go", ], importmap = "k8s.io/kops/vendor/k8s.io/apiserver/pkg/util/webhook", @@ -14,12 +15,15 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/hashicorp/golang-lru:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go b/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go index f6d79dea36231..dd0f4e5e663cf 100644 --- a/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go @@ -19,9 +19,11 @@ package webhook import ( "fmt" "io/ioutil" + "net/http" "strings" "time" + corev1 "k8s.io/api/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -31,6 +33,37 @@ import ( // rest.Config generated by the resolver. type AuthenticationInfoResolverWrapper func(AuthenticationInfoResolver) AuthenticationInfoResolver +// NewDefaultAuthenticationInfoResolverWrapper builds a default authn resolver wrapper +func NewDefaultAuthenticationInfoResolverWrapper( + proxyTransport *http.Transport, + kubeapiserverClientConfig *rest.Config) AuthenticationInfoResolverWrapper { + + webhookAuthResolverWrapper := func(delegate AuthenticationInfoResolver) AuthenticationInfoResolver { + return &AuthenticationInfoResolverDelegator{ + ClientConfigForFunc: func(server string) (*rest.Config, error) { + if server == "kubernetes.default.svc" { + return kubeapiserverClientConfig, nil + } + return delegate.ClientConfigFor(server) + }, + ClientConfigForServiceFunc: func(serviceName, serviceNamespace string) (*rest.Config, error) { + if serviceName == "kubernetes" && serviceNamespace == corev1.NamespaceDefault { + return kubeapiserverClientConfig, nil + } + ret, err := delegate.ClientConfigForService(serviceName, serviceNamespace) + if err != nil { + return nil, err + } + if proxyTransport != nil && proxyTransport.DialContext != nil { + ret.Dial = proxyTransport.DialContext + } + return ret, err + }, + } + } + return webhookAuthResolverWrapper +} + // AuthenticationInfoResolver builds rest.Config base on the server or service // name and service namespace. type AuthenticationInfoResolver interface { @@ -144,6 +177,7 @@ func restConfigFromKubeconfig(configAuthInfo *clientcmdapi.AuthInfo) (*rest.Conf return nil, err } config.BearerToken = string(tokenBytes) + config.BearerTokenFile = configAuthInfo.TokenFile } if len(configAuthInfo.Impersonate) > 0 { config.Impersonate = rest.ImpersonationConfig{ diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/client.go b/vendor/k8s.io/apiserver/pkg/util/webhook/client.go index 228e9f482aab3..0766bcdeec67f 100644 --- a/vendor/k8s.io/apiserver/pkg/util/webhook/client.go +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/client.go @@ -66,14 +66,14 @@ func NewClientManager(gv schema.GroupVersion, addToSchemaFunc func(s *runtime.Sc if err != nil { return ClientManager{}, err } - admissionScheme := runtime.NewScheme() - if err := addToSchemaFunc(admissionScheme); err != nil { + hookScheme := runtime.NewScheme() + if err := addToSchemaFunc(hookScheme); err != nil { return ClientManager{}, err } return ClientManager{ cache: cache, negotiatedSerializer: serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{ - Serializer: serializer.NewCodecFactory(admissionScheme).LegacyCodec(gv), + Serializer: serializer.NewCodecFactory(hookScheme).LegacyCodec(gv), }), }, nil } diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/error.go b/vendor/k8s.io/apiserver/pkg/util/webhook/error.go index 9e3b55dfbfb7b..4701530205d98 100644 --- a/vendor/k8s.io/apiserver/pkg/util/webhook/error.go +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/error.go @@ -28,7 +28,7 @@ type ErrCallingWebhook struct { func (e *ErrCallingWebhook) Error() string { if e.Reason != nil { - return fmt.Sprintf("failed calling admission webhook %q: %v", e.WebhookName, e.Reason) + return fmt.Sprintf("failed calling webhook %q: %v", e.WebhookName, e.Reason) } - return fmt.Sprintf("failed calling admission webhook %q; no further details available", e.WebhookName) + return fmt.Sprintf("failed calling webhook %q; no further details available", e.WebhookName) } diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go b/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go new file mode 100644 index 0000000000000..2ddb2c09ab7a9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go @@ -0,0 +1,101 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "fmt" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateWebhookURL validates webhook's URL. +func ValidateWebhookURL(fldPath *field.Path, URL string, forceHttps bool) field.ErrorList { + var allErrors field.ErrorList + const form = "; desired format: https://host[/path]" + if u, err := url.Parse(URL); err != nil { + allErrors = append(allErrors, field.Required(fldPath, "url must be a valid URL: "+err.Error()+form)) + } else { + if forceHttps && u.Scheme != "https" { + allErrors = append(allErrors, field.Invalid(fldPath, u.Scheme, "'https' is the only allowed URL scheme"+form)) + } + if len(u.Host) == 0 { + allErrors = append(allErrors, field.Invalid(fldPath, u.Host, "host must be provided"+form)) + } + if u.User != nil { + allErrors = append(allErrors, field.Invalid(fldPath, u.User.String(), "user information is not permitted in the URL")) + } + if len(u.Fragment) != 0 { + allErrors = append(allErrors, field.Invalid(fldPath, u.Fragment, "fragments are not permitted in the URL")) + } + if len(u.RawQuery) != 0 { + allErrors = append(allErrors, field.Invalid(fldPath, u.RawQuery, "query parameters are not permitted in the URL")) + } + } + return allErrors +} + +func ValidateWebhookService(fldPath *field.Path, namespace, name string, path *string) field.ErrorList { + var allErrors field.ErrorList + + if len(name) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("name"), "service name is required")) + } + + if len(namespace) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("namespace"), "service namespace is required")) + } + + if path == nil { + return allErrors + } + + // TODO: replace below with url.Parse + verifying that host is empty? + + urlPath := *path + if urlPath == "/" || len(urlPath) == 0 { + return allErrors + } + if urlPath == "//" { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, "segment[0] may not be empty")) + return allErrors + } + + if !strings.HasPrefix(urlPath, "/") { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, "must start with a '/'")) + } + + urlPathToCheck := urlPath[1:] + if strings.HasSuffix(urlPathToCheck, "/") { + urlPathToCheck = urlPathToCheck[:len(urlPathToCheck)-1] + } + steps := strings.Split(urlPathToCheck, "/") + for i, step := range steps { + if len(step) == 0 { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, fmt.Sprintf("segment[%d] may not be empty", i))) + continue + } + failures := validation.IsDNS1123Subdomain(step) + for _, failure := range failures { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, fmt.Sprintf("segment[%d]: %v", i, failure))) + } + } + + return allErrors +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go b/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go index 3b03fd3fd5eff..eb6c17bdb6b36 100755 --- a/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go @@ -37,7 +37,7 @@ const defaultRequestTimeout = 30 * time.Second type GenericWebhook struct { RestClient *rest.RESTClient - initialBackoff time.Duration + InitialBackoff time.Duration } // NewGenericWebhook creates a new GenericWebhook from the provided kubeconfig file. @@ -83,7 +83,7 @@ func newGenericWebhook(scheme *runtime.Scheme, codecFactory serializer.CodecFact // it returns an error for which apierrors.SuggestsClientDelay() or apierrors.IsInternalError() returns true. func (g *GenericWebhook) WithExponentialBackoff(webhookFn func() rest.Result) rest.Result { var result rest.Result - WithExponentialBackoff(g.initialBackoff, func() error { + WithExponentialBackoff(g.InitialBackoff, func() error { result = webhookFn() return result.Error() }) diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/util/wsstream/BUILD.bazel index 6791bb420fe3b..4aaea2b81b95c 100644 --- a/vendor/k8s.io/apiserver/pkg/util/wsstream/BUILD.bazel +++ b/vendor/k8s.io/apiserver/pkg/util/wsstream/BUILD.bazel @@ -11,8 +11,8 @@ go_library( importpath = "k8s.io/apiserver/pkg/util/wsstream", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/websocket:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go b/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go index 6f26b227579b0..2d1a79021363d 100644 --- a/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go +++ b/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go @@ -25,8 +25,8 @@ import ( "strings" "time" - "github.com/golang/glog" "golang.org/x/net/websocket" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/runtime" ) @@ -137,7 +137,7 @@ type ChannelProtocolConfig struct { // channels. func NewDefaultChannelProtocols(channels []ChannelType) map[string]ChannelProtocolConfig { return map[string]ChannelProtocolConfig{ - "": {Binary: true, Channels: channels}, + "": {Binary: true, Channels: channels}, ChannelWebSocketProtocol: {Binary: true, Channels: channels}, Base64ChannelWebSocketProtocol: {Binary: false, Channels: channels}, } @@ -251,7 +251,7 @@ func (conn *Conn) handle(ws *websocket.Conn) { var data []byte if err := websocket.Message.Receive(ws, &data); err != nil { if err != io.EOF { - glog.Errorf("Error on socket receive: %v", err) + klog.Errorf("Error on socket receive: %v", err) } break } @@ -264,11 +264,11 @@ func (conn *Conn) handle(ws *websocket.Conn) { } data = data[1:] if int(channel) >= len(conn.channels) { - glog.V(6).Infof("Frame is targeted for a reader %d that is not valid, possible protocol error", channel) + klog.V(6).Infof("Frame is targeted for a reader %d that is not valid, possible protocol error", channel) continue } if _, err := conn.channels[channel].DataFromSocket(data); err != nil { - glog.Errorf("Unable to write frame to %d: %v\n%s", channel, err, string(data)) + klog.Errorf("Unable to write frame to %d: %v\n%s", channel, err, string(data)) continue } } diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go b/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go index 9dd165bfabcf1..4253c17cf578e 100644 --- a/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go +++ b/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go @@ -48,7 +48,7 @@ type ReaderProtocolConfig struct { // subprotocols "", "channel.k8s.io", "base64.channel.k8s.io". func NewDefaultReaderProtocols() map[string]ReaderProtocolConfig { return map[string]ReaderProtocolConfig{ - "": {Binary: true}, + "": {Binary: true}, binaryWebSocketProtocol: {Binary: true}, base64BinaryWebSocketProtocol: {Binary: false}, } diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go index 66165915fcca9..a96d9bea30f8f 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go @@ -251,7 +251,7 @@ func (b *bufferedBackend) processEvents(events []*auditinternal.Event) { } } -func (b *bufferedBackend) ProcessEvents(ev ...*auditinternal.Event) { +func (b *bufferedBackend) ProcessEvents(ev ...*auditinternal.Event) bool { // The following mechanism is in place to support the situation when audit // events are still coming after the backend was stopped. var sendErr error @@ -279,9 +279,10 @@ func (b *bufferedBackend) ProcessEvents(ev ...*auditinternal.Event) { case b.buffer <- event: default: sendErr = fmt.Errorf("audit buffer queue blocked") - return + return true } } + return true } func (b *bufferedBackend) String() string { diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/BUILD.bazel b/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/BUILD.bazel new file mode 100644 index 0000000000000..07b6c3a0f24a5 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/BUILD.bazel @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "defaults.go", + "dynamic.go", + "factory.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic", + importpath = "k8s.io/apiserver/plugin/pkg/audit/dynamic", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/auditregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/audit/install:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/audit/v1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/audit:go_default_library", + "//vendor/k8s.io/apiserver/pkg/audit/policy:go_default_library", + "//vendor/k8s.io/apiserver/pkg/audit/util:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/webhook:go_default_library", + "//vendor/k8s.io/apiserver/plugin/pkg/audit/buffered:go_default_library", + "//vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/enforced:go_default_library", + "//vendor/k8s.io/apiserver/plugin/pkg/audit/webhook:go_default_library", + "//vendor/k8s.io/client-go/informers/auditregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/defaults.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/defaults.go new file mode 100644 index 0000000000000..f442954b50082 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/defaults.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamic + +import ( + "time" + + bufferedplugin "k8s.io/apiserver/plugin/pkg/audit/buffered" +) + +const ( + // Default configuration values for ModeBatch when applied to a dynamic plugin + defaultBatchBufferSize = 5000 // Buffer up to 5000 events before starting discarding. + defaultBatchMaxSize = 400 // Only send up to 400 events at a time. + defaultBatchMaxWait = 30 * time.Second // Send events at least twice a minute. + defaultBatchThrottleQPS = 10 // Limit the send rate by 10 QPS. + defaultBatchThrottleBurst = 15 // Allow up to 15 QPS burst. +) + +// NewDefaultWebhookBatchConfig returns new Batch Config objects populated by default values +// for dynamic webhooks +func NewDefaultWebhookBatchConfig() *bufferedplugin.BatchConfig { + return &bufferedplugin.BatchConfig{ + BufferSize: defaultBatchBufferSize, + MaxBatchSize: defaultBatchMaxSize, + MaxBatchWait: defaultBatchMaxWait, + ThrottleEnable: true, + ThrottleQPS: defaultBatchThrottleQPS, + ThrottleBurst: defaultBatchThrottleBurst, + AsyncDelegate: true, + } +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/dynamic.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/dynamic.go new file mode 100644 index 0000000000000..393a205e1646b --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/dynamic.go @@ -0,0 +1,338 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamic + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" + + "k8s.io/klog" + + auditregv1alpha1 "k8s.io/api/auditregistration/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + auditinstall "k8s.io/apiserver/pkg/apis/audit/install" + auditv1 "k8s.io/apiserver/pkg/apis/audit/v1" + "k8s.io/apiserver/pkg/audit" + webhook "k8s.io/apiserver/pkg/util/webhook" + bufferedplugin "k8s.io/apiserver/plugin/pkg/audit/buffered" + auditinformer "k8s.io/client-go/informers/auditregistration/v1alpha1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" +) + +// PluginName is the name reported in error metrics. +const PluginName = "dynamic" + +// Config holds the configuration for the dynamic backend +type Config struct { + // Informer for the audit sinks + Informer auditinformer.AuditSinkInformer + // EventConfig holds the configuration for event notifications about the AuditSink API objects + EventConfig EventConfig + // BufferedConfig is the runtime buffered configuration + BufferedConfig *bufferedplugin.BatchConfig + // WebhookConfig holds the configuration for outgoing webhooks + WebhookConfig WebhookConfig +} + +// WebhookConfig holds the configurations for outgoing webhooks +type WebhookConfig struct { + // AuthInfoResolverWrapper provides the webhook authentication for in-cluster endpoints + AuthInfoResolverWrapper webhook.AuthenticationInfoResolverWrapper + // ServiceResolver knows how to convert a webhook service reference into an actual location. + ServiceResolver webhook.ServiceResolver +} + +// EventConfig holds the configurations for sending event notifiations about AuditSink API objects +type EventConfig struct { + // Sink for emitting events + Sink record.EventSink + // Source holds the source information about the event emitter + Source corev1.EventSource +} + +// delegate represents a delegate backend that was created from an audit sink configuration +type delegate struct { + audit.Backend + configuration *auditregv1alpha1.AuditSink + stopChan chan struct{} +} + +// gracefulShutdown will gracefully shutdown the delegate +func (d *delegate) gracefulShutdown() { + close(d.stopChan) + d.Shutdown() +} + +// NewBackend returns a backend that dynamically updates its configuration +// based on a shared informer. +func NewBackend(c *Config) (audit.Backend, error) { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(c.EventConfig.Sink) + + scheme := runtime.NewScheme() + err := auditregv1alpha1.AddToScheme(scheme) + if err != nil { + return nil, err + } + recorder := eventBroadcaster.NewRecorder(scheme, c.EventConfig.Source) + + if c.BufferedConfig == nil { + c.BufferedConfig = NewDefaultWebhookBatchConfig() + } + cm, err := webhook.NewClientManager(auditv1.SchemeGroupVersion, func(s *runtime.Scheme) error { + auditinstall.Install(s) + return nil + }) + if err != nil { + return nil, err + } + + // TODO: need a way of injecting authentication before beta + authInfoResolver, err := webhook.NewDefaultAuthenticationInfoResolver("") + if err != nil { + return nil, err + } + cm.SetAuthenticationInfoResolver(authInfoResolver) + cm.SetServiceResolver(c.WebhookConfig.ServiceResolver) + cm.SetAuthenticationInfoResolverWrapper(c.WebhookConfig.AuthInfoResolverWrapper) + + manager := &backend{ + config: c, + delegates: atomic.Value{}, + delegateUpdateMutex: sync.Mutex{}, + webhookClientManager: cm, + recorder: recorder, + } + manager.delegates.Store(syncedDelegates{}) + + c.Informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + manager.addSink(obj.(*auditregv1alpha1.AuditSink)) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + manager.updateSink(oldObj.(*auditregv1alpha1.AuditSink), newObj.(*auditregv1alpha1.AuditSink)) + }, + DeleteFunc: func(obj interface{}) { + sink, ok := obj.(*auditregv1alpha1.AuditSink) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + klog.V(2).Infof("Couldn't get object from tombstone %#v", obj) + return + } + sink, ok = tombstone.Obj.(*auditregv1alpha1.AuditSink) + if !ok { + klog.V(2).Infof("Tombstone contained object that is not an AuditSink: %#v", obj) + return + } + } + manager.deleteSink(sink) + }, + }) + + return manager, nil +} + +type backend struct { + // delegateUpdateMutex holds an update lock on the delegates + delegateUpdateMutex sync.Mutex + config *Config + delegates atomic.Value + webhookClientManager webhook.ClientManager + recorder record.EventRecorder +} + +type syncedDelegates map[types.UID]*delegate + +// Names returns the names of the delegate configurations +func (s syncedDelegates) Names() []string { + names := []string{} + for _, delegate := range s { + names = append(names, delegate.configuration.Name) + } + return names +} + +// ProcessEvents proccesses the given events per current delegate map +func (b *backend) ProcessEvents(events ...*auditinternal.Event) bool { + for _, d := range b.GetDelegates() { + d.ProcessEvents(events...) + } + // Returning true regardless of results, since dynamic audit backends + // can never cause apiserver request to fail. + return true +} + +// Run starts a goroutine that propagates the shutdown signal, +// individual delegates are ran as they are created. +func (b *backend) Run(stopCh <-chan struct{}) error { + go func() { + <-stopCh + b.stopAllDelegates() + }() + return nil +} + +// stopAllDelegates closes the stopChan for every delegate to enable +// goroutines to terminate gracefully. This is a helper method to propagate +// the primary stopChan to the current delegate map. +func (b *backend) stopAllDelegates() { + b.delegateUpdateMutex.Lock() + for _, d := range b.GetDelegates() { + close(d.stopChan) + } +} + +// Shutdown calls the shutdown method on all delegates. The stopChan should +// be closed before this is called. +func (b *backend) Shutdown() { + for _, d := range b.GetDelegates() { + d.Shutdown() + } +} + +// GetDelegates retrieves current delegates in a safe manner +func (b *backend) GetDelegates() syncedDelegates { + return b.delegates.Load().(syncedDelegates) +} + +// copyDelegates returns a copied delegate map +func (b *backend) copyDelegates() syncedDelegates { + c := make(syncedDelegates) + for u, s := range b.GetDelegates() { + c[u] = s + } + return c +} + +// setDelegates sets the current delegates in a safe manner +func (b *backend) setDelegates(delegates syncedDelegates) { + b.delegates.Store(delegates) +} + +// addSink is called by the shared informer when a sink is added +func (b *backend) addSink(sink *auditregv1alpha1.AuditSink) { + b.delegateUpdateMutex.Lock() + defer b.delegateUpdateMutex.Unlock() + delegates := b.copyDelegates() + if _, ok := delegates[sink.UID]; ok { + klog.Errorf("Audit sink %q uid: %s already exists, could not readd", sink.Name, sink.UID) + return + } + d, err := b.createAndStartDelegate(sink) + if err != nil { + msg := fmt.Sprintf("Could not add audit sink %q: %v", sink.Name, err) + klog.Error(msg) + b.recorder.Event(sink, corev1.EventTypeWarning, "CreateFailed", msg) + return + } + delegates[sink.UID] = d + b.setDelegates(delegates) + klog.V(2).Infof("Added audit sink: %s", sink.Name) + klog.V(2).Infof("Current audit sinks: %v", delegates.Names()) +} + +// updateSink is called by the shared informer when a sink is updated. +// The new sink is only rebuilt on spec changes. The new sink must not have +// the same uid as the previous. The new sink will be started before the old +// one is shutdown so no events will be lost +func (b *backend) updateSink(oldSink, newSink *auditregv1alpha1.AuditSink) { + b.delegateUpdateMutex.Lock() + defer b.delegateUpdateMutex.Unlock() + delegates := b.copyDelegates() + oldDelegate, ok := delegates[oldSink.UID] + if !ok { + klog.Errorf("Could not update audit sink %q uid: %s, old sink does not exist", + oldSink.Name, oldSink.UID) + return + } + + // check if spec has changed + eq := reflect.DeepEqual(oldSink.Spec, newSink.Spec) + if eq { + delete(delegates, oldSink.UID) + delegates[newSink.UID] = oldDelegate + b.setDelegates(delegates) + } else { + d, err := b.createAndStartDelegate(newSink) + if err != nil { + msg := fmt.Sprintf("Could not update audit sink %q: %v", oldSink.Name, err) + klog.Error(msg) + b.recorder.Event(newSink, corev1.EventTypeWarning, "UpdateFailed", msg) + return + } + delete(delegates, oldSink.UID) + delegates[newSink.UID] = d + b.setDelegates(delegates) + oldDelegate.gracefulShutdown() + } + + klog.V(2).Infof("Updated audit sink: %s", newSink.Name) + klog.V(2).Infof("Current audit sinks: %v", delegates.Names()) +} + +// deleteSink is called by the shared informer when a sink is deleted +func (b *backend) deleteSink(sink *auditregv1alpha1.AuditSink) { + b.delegateUpdateMutex.Lock() + defer b.delegateUpdateMutex.Unlock() + delegates := b.copyDelegates() + delegate, ok := delegates[sink.UID] + if !ok { + klog.Errorf("Could not delete audit sink %q uid: %s, does not exist", sink.Name, sink.UID) + return + } + delete(delegates, sink.UID) + b.setDelegates(delegates) + delegate.gracefulShutdown() + klog.V(2).Infof("Deleted audit sink: %s", sink.Name) + klog.V(2).Infof("Current audit sinks: %v", delegates.Names()) +} + +// createAndStartDelegate will build a delegate from an audit sink configuration and run it +func (b *backend) createAndStartDelegate(sink *auditregv1alpha1.AuditSink) (*delegate, error) { + f := factory{ + config: b.config, + webhookClientManager: b.webhookClientManager, + sink: sink, + } + delegate, err := f.BuildDelegate() + if err != nil { + return nil, err + } + err = delegate.Run(delegate.stopChan) + if err != nil { + return nil, err + } + return delegate, nil +} + +// String returns a string representation of the backend +func (b *backend) String() string { + var delegateStrings []string + for _, delegate := range b.GetDelegates() { + delegateStrings = append(delegateStrings, fmt.Sprintf("%s", delegate)) + } + return fmt.Sprintf("%s[%s]", PluginName, strings.Join(delegateStrings, ",")) +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/enforced/BUILD.bazel b/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/enforced/BUILD.bazel new file mode 100644 index 0000000000000..6b6b7adcc2843 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/enforced/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["enforced.go"], + importmap = "k8s.io/kops/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/enforced", + importpath = "k8s.io/apiserver/plugin/pkg/audit/dynamic/enforced", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", + "//vendor/k8s.io/apiserver/pkg/audit:go_default_library", + "//vendor/k8s.io/apiserver/pkg/audit/event:go_default_library", + "//vendor/k8s.io/apiserver/pkg/audit/policy:go_default_library", + ], +) diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/enforced/enforced.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/enforced/enforced.go new file mode 100644 index 0000000000000..8feb523bedf6d --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/enforced/enforced.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enforced + +import ( + "fmt" + + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" + ev "k8s.io/apiserver/pkg/audit/event" + "k8s.io/apiserver/pkg/audit/policy" +) + +// PluginName is the name reported in error metrics. +const PluginName = "enforced" + +// Backend filters audit events according to the policy +// trimming them as necessary to match the level +type Backend struct { + policyChecker policy.Checker + delegateBackend audit.Backend +} + +// NewBackend returns an enforced audit backend that wraps delegate backend. +// Enforced backend automatically runs and shuts down the delegate backend. +func NewBackend(delegate audit.Backend, p policy.Checker) audit.Backend { + return &Backend{ + policyChecker: p, + delegateBackend: delegate, + } +} + +// Run the delegate backend +func (b Backend) Run(stopCh <-chan struct{}) error { + return b.delegateBackend.Run(stopCh) +} + +// Shutdown the delegate backend +func (b Backend) Shutdown() { + b.delegateBackend.Shutdown() +} + +// ProcessEvents enforces policy on a shallow copy of the given event +// dropping any sections that don't conform +func (b Backend) ProcessEvents(events ...*auditinternal.Event) bool { + for _, event := range events { + if event == nil { + continue + } + attr, err := ev.NewAttributes(event) + if err != nil { + audit.HandlePluginError(PluginName, err, event) + continue + } + level, stages := b.policyChecker.LevelAndStages(attr) + if level == auditinternal.LevelNone { + continue + } + // make shallow copy before modifying to satisfy interface definition + ev := *event + e, err := policy.EnforcePolicy(&ev, level, stages) + if err != nil { + audit.HandlePluginError(PluginName, err, event) + continue + } + if e == nil { + continue + } + b.delegateBackend.ProcessEvents(e) + } + // Returning true regardless of results, since dynamic audit backends + // can never cause apiserver request to fail. + return true +} + +// String returns a string representation of the backend +func (b Backend) String() string { + return fmt.Sprintf("%s<%s>", PluginName, b.delegateBackend) +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/factory.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/factory.go new file mode 100644 index 0000000000000..f9ce7abf7906c --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/dynamic/factory.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamic + +import ( + "fmt" + "time" + + auditregv1alpha1 "k8s.io/api/auditregistration/v1alpha1" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/audit/policy" + auditutil "k8s.io/apiserver/pkg/audit/util" + "k8s.io/apiserver/pkg/util/webhook" + bufferedplugin "k8s.io/apiserver/plugin/pkg/audit/buffered" + enforcedplugin "k8s.io/apiserver/plugin/pkg/audit/dynamic/enforced" + webhookplugin "k8s.io/apiserver/plugin/pkg/audit/webhook" +) + +// TODO: find a common place for all the default retry backoffs +const retryBackoff = 500 * time.Millisecond + +// factory builds a delegate from an AuditSink +type factory struct { + config *Config + webhookClientManager webhook.ClientManager + sink *auditregv1alpha1.AuditSink +} + +// BuildDelegate creates a delegate from the AuditSink object +func (f *factory) BuildDelegate() (*delegate, error) { + backend, err := f.buildWebhookBackend() + if err != nil { + return nil, err + } + backend = f.applyEnforcedOpts(backend) + backend = f.applyBufferedOpts(backend) + ch := make(chan struct{}) + return &delegate{ + Backend: backend, + configuration: f.sink, + stopChan: ch, + }, nil +} + +func (f *factory) buildWebhookBackend() (audit.Backend, error) { + hookClient := auditutil.HookClientConfigForSink(f.sink) + client, err := f.webhookClientManager.HookClient(hookClient) + if err != nil { + return nil, fmt.Errorf("could not create webhook client: %v", err) + } + backend := webhookplugin.NewDynamicBackend(client, retryBackoff) + return backend, nil +} + +func (f *factory) applyEnforcedOpts(delegate audit.Backend) audit.Backend { + pol := policy.ConvertDynamicPolicyToInternal(&f.sink.Spec.Policy) + checker := policy.NewChecker(pol) + eb := enforcedplugin.NewBackend(delegate, checker) + return eb +} + +func (f *factory) applyBufferedOpts(delegate audit.Backend) audit.Backend { + bc := f.config.BufferedConfig + tc := f.sink.Spec.Webhook.Throttle + if tc != nil { + bc.ThrottleEnable = true + if tc.Burst != nil { + bc.ThrottleBurst = int(*tc.Burst) + } + if tc.QPS != nil { + bc.ThrottleQPS = float32(*tc.QPS) + } + } else { + bc.ThrottleEnable = false + } + return bufferedplugin.NewBackend(delegate, *bc) +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go index 293cdd3c9f7d3..e1c948f62ae0d 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go @@ -59,13 +59,15 @@ func NewBackend(out io.Writer, format string, groupVersion schema.GroupVersion) } } -func (b *backend) ProcessEvents(events ...*auditinternal.Event) { +func (b *backend) ProcessEvents(events ...*auditinternal.Event) bool { + success := true for _, ev := range events { - b.logEvent(ev) + success = b.logEvent(ev) && success } + return success } -func (b *backend) logEvent(ev *auditinternal.Event) { +func (b *backend) logEvent(ev *auditinternal.Event) bool { line := "" switch b.format { case FormatLegacy: @@ -74,17 +76,19 @@ func (b *backend) logEvent(ev *auditinternal.Event) { bs, err := runtime.Encode(audit.Codecs.LegacyCodec(b.groupVersion), ev) if err != nil { audit.HandlePluginError(PluginName, err, ev) - return + return false } line = string(bs[:]) default: audit.HandlePluginError(PluginName, fmt.Errorf("log format %q is not in list of known formats (%s)", b.format, strings.Join(AllowedFormats, ",")), ev) - return + return false } if _, err := fmt.Fprint(b.out, line); err != nil { audit.HandlePluginError(PluginName, err, ev) + return false } + return true } func (b *backend) Run(stopCh <-chan struct{}) error { diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go index e06f1f2f05729..de1c2d9f74bec 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go @@ -71,11 +71,12 @@ func NewBackend(delegateBackend audit.Backend, config Config, groupVersion schem } } -func (b *backend) ProcessEvents(events ...*auditinternal.Event) { +func (b *backend) ProcessEvents(events ...*auditinternal.Event) bool { var errors []error var impacted []*auditinternal.Event var batch []*auditinternal.Event var batchSize int64 + success := true for _, event := range events { size, err := b.calcSize(event) // If event was correctly serialized, but the size is more than allowed @@ -97,7 +98,7 @@ func (b *backend) ProcessEvents(events ...*auditinternal.Event) { } if len(batch) > 0 && batchSize+size > b.c.MaxBatchSize { - b.delegateBackend.ProcessEvents(batch...) + success = b.delegateBackend.ProcessEvents(batch...) && success batch = []*auditinternal.Event{} batchSize = 0 } @@ -107,12 +108,13 @@ func (b *backend) ProcessEvents(events ...*auditinternal.Event) { } if len(batch) > 0 { - b.delegateBackend.ProcessEvents(batch...) + success = b.delegateBackend.ProcessEvents(batch...) && success } if len(impacted) > 0 { audit.HandlePluginError(PluginName, utilerrors.NewAggregate(errors), impacted...) } + return success } // truncate removed request and response objects from the audit events, diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go index 80b4842fd2973..9b44e8e85d79e 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go @@ -18,6 +18,7 @@ limitations under the License. package webhook import ( + "fmt" "time" "k8s.io/apimachinery/pkg/runtime/schema" @@ -47,7 +48,20 @@ func loadWebhook(configFile string, groupVersion schema.GroupVersion, initialBac } type backend struct { - w *webhook.GenericWebhook + w *webhook.GenericWebhook + name string +} + +// NewDynamicBackend returns an audit backend configured from a REST client that +// sends events over HTTP to an external service. +func NewDynamicBackend(rc *rest.RESTClient, initialBackoff time.Duration) audit.Backend { + return &backend{ + w: &webhook.GenericWebhook{ + RestClient: rc, + InitialBackoff: initialBackoff, + }, + name: fmt.Sprintf("dynamic_%s", PluginName), + } } // NewBackend returns an audit backend that sends events over HTTP to an external service. @@ -56,7 +70,7 @@ func NewBackend(kubeConfigFile string, groupVersion schema.GroupVersion, initial if err != nil { return nil, err } - return &backend{w}, nil + return &backend{w: w, name: PluginName}, nil } func (b *backend) Run(stopCh <-chan struct{}) error { @@ -67,10 +81,12 @@ func (b *backend) Shutdown() { // nothing to do here } -func (b *backend) ProcessEvents(ev ...*auditinternal.Event) { +func (b *backend) ProcessEvents(ev ...*auditinternal.Event) bool { if err := b.processEvents(ev...); err != nil { - audit.HandlePluginError(PluginName, err, ev...) + audit.HandlePluginError(b.String(), err, ev...) + return false } + return true } func (b *backend) processEvents(ev ...*auditinternal.Event) error { @@ -84,5 +100,5 @@ func (b *backend) processEvents(ev ...*auditinternal.Event) error { } func (b *backend) String() string { - return PluginName + return b.name } diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD.bazel b/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD.bazel index dce7dae13993e..c765388468f8f 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD.bazel +++ b/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD.bazel @@ -7,15 +7,14 @@ go_library( importpath = "k8s.io/apiserver/plugin/pkg/authenticator/token/webhook", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/authentication/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/cache:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/webhook:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go index feb55f91d35f5..cf0a83b5d979c 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go +++ b/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go @@ -18,19 +18,18 @@ limitations under the License. package webhook import ( + "context" "time" - "github.com/golang/glog" - authentication "k8s.io/api/authentication/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/cache" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/util/webhook" "k8s.io/client-go/kubernetes/scheme" authenticationclient "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" + "k8s.io/klog" ) var ( @@ -44,54 +43,82 @@ var _ authenticator.Token = (*WebhookTokenAuthenticator)(nil) type WebhookTokenAuthenticator struct { tokenReview authenticationclient.TokenReviewInterface - responseCache *cache.LRUExpireCache - ttl time.Duration initialBackoff time.Duration + implicitAuds authenticator.Audiences } -// NewFromInterface creates a webhook authenticator using the given tokenReview client -func NewFromInterface(tokenReview authenticationclient.TokenReviewInterface, ttl time.Duration) (*WebhookTokenAuthenticator, error) { - return newWithBackoff(tokenReview, ttl, retryBackoff) +// NewFromInterface creates a webhook authenticator using the given tokenReview +// client. It is recommend to wrap this authenticator with the token cache +// authenticator implemented in +// k8s.io/apiserver/pkg/authentication/token/cache. +func NewFromInterface(tokenReview authenticationclient.TokenReviewInterface, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { + return newWithBackoff(tokenReview, retryBackoff, implicitAuds) } -// New creates a new WebhookTokenAuthenticator from the provided kubeconfig file. -func New(kubeConfigFile string, ttl time.Duration) (*WebhookTokenAuthenticator, error) { +// New creates a new WebhookTokenAuthenticator from the provided kubeconfig +// file. It is recommend to wrap this authenticator with the token cache +// authenticator implemented in +// k8s.io/apiserver/pkg/authentication/token/cache. +func New(kubeConfigFile string, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { tokenReview, err := tokenReviewInterfaceFromKubeconfig(kubeConfigFile) if err != nil { return nil, err } - return newWithBackoff(tokenReview, ttl, retryBackoff) + return newWithBackoff(tokenReview, retryBackoff, implicitAuds) } // newWithBackoff allows tests to skip the sleep. -func newWithBackoff(tokenReview authenticationclient.TokenReviewInterface, ttl, initialBackoff time.Duration) (*WebhookTokenAuthenticator, error) { - return &WebhookTokenAuthenticator{tokenReview, cache.NewLRUExpireCache(1024), ttl, initialBackoff}, nil +func newWithBackoff(tokenReview authenticationclient.TokenReviewInterface, initialBackoff time.Duration, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { + return &WebhookTokenAuthenticator{tokenReview, initialBackoff, implicitAuds}, nil } // AuthenticateToken implements the authenticator.Token interface. -func (w *WebhookTokenAuthenticator) AuthenticateToken(token string) (user.Info, bool, error) { +func (w *WebhookTokenAuthenticator) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { + // We take implicit audiences of the API server at WebhookTokenAuthenticator + // construction time. The outline of how we validate audience here is: + // + // * if the ctx is not audience limited, don't do any audience validation. + // * if ctx is audience-limited, add the audiences to the tokenreview spec + // * if the tokenreview returns with audiences in the status that intersect + // with the audiences in the ctx, copy into the response and return success + // * if the tokenreview returns without an audience in the status, ensure + // the ctx audiences intersect with the implicit audiences, and set the + // intersection in the response. + // * otherwise return unauthenticated. + wantAuds, checkAuds := authenticator.AudiencesFrom(ctx) r := &authentication.TokenReview{ - Spec: authentication.TokenReviewSpec{Token: token}, + Spec: authentication.TokenReviewSpec{ + Token: token, + Audiences: wantAuds, + }, + } + var ( + result *authentication.TokenReview + err error + auds authenticator.Audiences + ) + webhook.WithExponentialBackoff(w.initialBackoff, func() error { + result, err = w.tokenReview.Create(r) + return err + }) + if err != nil { + // An error here indicates bad configuration or an outage. Log for debugging. + klog.Errorf("Failed to make webhook authenticator request: %v", err) + return nil, false, err } - if entry, ok := w.responseCache.Get(r.Spec); ok { - r.Status = entry.(authentication.TokenReviewStatus) - } else { - var ( - result *authentication.TokenReview - err error - ) - webhook.WithExponentialBackoff(w.initialBackoff, func() error { - result, err = w.tokenReview.Create(r) - return err - }) - if err != nil { - // An error here indicates bad configuration or an outage. Log for debugging. - glog.Errorf("Failed to make webhook authenticator request: %v", err) - return nil, false, err + + if checkAuds { + gotAuds := w.implicitAuds + if len(result.Status.Audiences) > 0 { + gotAuds = result.Status.Audiences + } + auds = wantAuds.Intersect(gotAuds) + if len(auds) == 0 { + return nil, false, nil } - r.Status = result.Status - w.responseCache.Add(r.Spec, result.Status, w.ttl) } + + r.Status = result.Status if !r.Status.Authenticated { return nil, false, nil } @@ -104,11 +131,14 @@ func (w *WebhookTokenAuthenticator) AuthenticateToken(token string) (user.Info, } } - return &user.DefaultInfo{ - Name: r.Status.User.Username, - UID: r.Status.User.UID, - Groups: r.Status.User.Groups, - Extra: extra, + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: r.Status.User.Username, + UID: r.Status.User.UID, + Groups: r.Status.User.Groups, + Extra: extra, + }, + Audiences: auds, }, true, nil } diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD.bazel b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD.bazel index 27f6dfc1f8f50..1847d238657d9 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD.bazel +++ b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD.bazel @@ -7,7 +7,6 @@ go_library( importpath = "k8s.io/apiserver/plugin/pkg/authorizer/webhook", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/authorization/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -17,5 +16,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/util/webhook:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go index 03b7bda32fe5c..e05ef503f22a0 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go +++ b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go @@ -22,7 +22,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" authorization "k8s.io/api/authorization/v1beta1" "k8s.io/apimachinery/pkg/runtime" @@ -189,7 +189,7 @@ func (w *WebhookAuthorizer) Authorize(attr authorizer.Attributes) (decision auth }) if err != nil { // An error here indicates bad configuration or an outage. Log for debugging. - glog.Errorf("Failed to make webhook authorizer request: %v", err) + klog.Errorf("Failed to make webhook authorizer request: %v", err) return w.decisionOnError, "", err } r.Status = result.Status diff --git a/vendor/k8s.io/client-go/discovery/BUILD.bazel b/vendor/k8s.io/client-go/discovery/BUILD.bazel index 73f600c2e88fa..0e6ebdbf67f49 100644 --- a/vendor/k8s.io/client-go/discovery/BUILD.bazel +++ b/vendor/k8s.io/client-go/discovery/BUILD.bazel @@ -13,7 +13,6 @@ go_library( importpath = "k8s.io/client-go/discovery", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/protobuf/proto:go_default_library", "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", "//vendor/github.com/gregjones/httpcache:go_default_library", @@ -29,5 +28,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/discovery/cached_discovery.go b/vendor/k8s.io/client-go/discovery/cached_discovery.go index d38a0bbdad31f..df69d6a1930bd 100644 --- a/vendor/k8s.io/client-go/discovery/cached_discovery.go +++ b/vendor/k8s.io/client-go/discovery/cached_discovery.go @@ -25,8 +25,8 @@ import ( "sync" "time" - "github.com/golang/glog" "github.com/googleapis/gnostic/OpenAPIv2" + "k8s.io/klog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -67,23 +67,23 @@ func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion stri if err == nil { cachedResources := &metav1.APIResourceList{} if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { - glog.V(10).Infof("returning cached discovery info from %v", filename) + klog.V(10).Infof("returning cached discovery info from %v", filename) return cachedResources, nil } } liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) if err != nil { - glog.V(3).Infof("skipped caching discovery info due to %v", err) + klog.V(3).Infof("skipped caching discovery info due to %v", err) return liveResources, err } if liveResources == nil || len(liveResources.APIResources) == 0 { - glog.V(3).Infof("skipped caching discovery info, no resources found") + klog.V(3).Infof("skipped caching discovery info, no resources found") return liveResources, err } if err := d.writeCachedFile(filename, liveResources); err != nil { - glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) + klog.V(1).Infof("failed to write cache to %v due to %v", filename, err) } return liveResources, nil @@ -94,6 +94,8 @@ func (d *CachedDiscoveryClient) ServerResources() ([]*metav1.APIResourceList, er return ServerResources(d) } +// ServerGroups returns the supported groups, with information like supported versions and the +// preferred version. func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { filename := filepath.Join(d.cacheDirectory, "servergroups.json") cachedBytes, err := d.getCachedFile(filename) @@ -101,23 +103,23 @@ func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { if err == nil { cachedGroups := &metav1.APIGroupList{} if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { - glog.V(10).Infof("returning cached discovery info from %v", filename) + klog.V(10).Infof("returning cached discovery info from %v", filename) return cachedGroups, nil } } liveGroups, err := d.delegate.ServerGroups() if err != nil { - glog.V(3).Infof("skipped caching discovery info due to %v", err) + klog.V(3).Infof("skipped caching discovery info due to %v", err) return liveGroups, err } if liveGroups == nil || len(liveGroups.Groups) == 0 { - glog.V(3).Infof("skipped caching discovery info, no groups found") + klog.V(3).Infof("skipped caching discovery info, no groups found") return liveGroups, err } if err := d.writeCachedFile(filename, liveGroups); err != nil { - glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) + klog.V(1).Infof("failed to write cache to %v due to %v", filename, err) } return liveGroups, nil @@ -202,26 +204,36 @@ func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Obj return err } +// RESTClient returns a RESTClient that is used to communicate with API server +// by this client implementation. func (d *CachedDiscoveryClient) RESTClient() restclient.Interface { return d.delegate.RESTClient() } +// ServerPreferredResources returns the supported resources with the version preferred by the +// server. func (d *CachedDiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { return ServerPreferredResources(d) } +// ServerPreferredNamespacedResources returns the supported namespaced resources with the +// version preferred by the server. func (d *CachedDiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { return ServerPreferredNamespacedResources(d) } +// ServerVersion retrieves and parses the server's version (git version). func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) { return d.delegate.ServerVersion() } +// OpenAPISchema retrieves and parses the swagger API schema the server supports. func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { return d.delegate.OpenAPISchema() } +// Fresh is supposed to tell the caller whether or not to retry if the cache +// fails to find something (false = retry, true = no need to retry). func (d *CachedDiscoveryClient) Fresh() bool { d.mutex.Lock() defer d.mutex.Unlock() @@ -229,6 +241,7 @@ func (d *CachedDiscoveryClient) Fresh() bool { return d.fresh } +// Invalidate enforces that no cached data is used in the future that is older than the current time. func (d *CachedDiscoveryClient) Invalidate() { d.mutex.Lock() defer d.mutex.Unlock() diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index a96602974f4ed..17b39de053964 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -263,8 +263,8 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, result := []*metav1.APIResourceList{} grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource - grApiResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource - gvApiResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping + grAPIResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource + gvAPIResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping for _, apiGroup := range serverGroupList.Groups { for _, version := range apiGroup.Versions { @@ -276,11 +276,11 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, } // create empty list which is filled later in another loop - emptyApiResourceList := metav1.APIResourceList{ + emptyAPIResourceList := metav1.APIResourceList{ GroupVersion: version.GroupVersion, } - gvApiResourceLists[groupVersion] = &emptyApiResourceList - result = append(result, &emptyApiResourceList) + gvAPIResourceLists[groupVersion] = &emptyAPIResourceList + result = append(result, &emptyAPIResourceList) for i := range apiResourceList.APIResources { apiResource := &apiResourceList.APIResources[i] @@ -288,21 +288,21 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, continue } gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name} - if _, ok := grApiResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { + if _, ok := grAPIResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { // only override with preferred version continue } grVersions[gv] = version.Version - grApiResources[gv] = apiResource + grAPIResources[gv] = apiResource } } } // group selected APIResources according to GroupVersion into APIResourceLists - for groupResource, apiResource := range grApiResources { + for groupResource, apiResource := range grAPIResources { version := grVersions[groupResource] groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version} - apiResourceList := gvApiResourceLists[groupVersion] + apiResourceList := gvAPIResourceLists[groupVersion] apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource) } @@ -464,9 +464,9 @@ func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *DiscoveryClient) RESTClient() restclient.Interface { - if c == nil { +func (d *DiscoveryClient) RESTClient() restclient.Interface { + if d == nil { return nil } - return c.restClient + return d.restClient } diff --git a/vendor/k8s.io/client-go/discovery/fake/discovery.go b/vendor/k8s.io/client-go/discovery/fake/discovery.go index 984a0ba1ec69a..9565fa46c5ece 100644 --- a/vendor/k8s.io/client-go/discovery/fake/discovery.go +++ b/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -36,6 +36,8 @@ type FakeDiscovery struct { FakedServerVersion *version.Info } +// ServerResourcesForGroupVersion returns the supported resources for a group +// and version. func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { action := testing.ActionImpl{ Verb: "get", @@ -50,6 +52,7 @@ func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*me return nil, fmt.Errorf("GroupVersion %q not found", groupVersion) } +// ServerResources returns the supported resources for all groups and versions. func (c *FakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) { action := testing.ActionImpl{ Verb: "get", @@ -59,14 +62,20 @@ func (c *FakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) { return c.Resources, nil } +// ServerPreferredResources returns the supported resources with the version +// preferred by the server. func (c *FakeDiscovery) ServerPreferredResources() ([]*metav1.APIResourceList, error) { return nil, nil } +// ServerPreferredNamespacedResources returns the supported namespaced resources +// with the version preferred by the server. func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { return nil, nil } +// ServerGroups returns the supported groups, with information like supported +// versions and the preferred version. func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { action := testing.ActionImpl{ Verb: "get", @@ -108,6 +117,7 @@ func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { } +// ServerVersion retrieves and parses the server's version. func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { action := testing.ActionImpl{} action.Verb = "get" @@ -122,10 +132,13 @@ func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { return &versionInfo, nil } +// OpenAPISchema retrieves and parses the swagger API schema the server supports. func (c *FakeDiscovery) OpenAPISchema() (*openapi_v2.Document, error) { return &openapi_v2.Document{}, nil } +// RESTClient returns a RESTClient that is used to communicate with API server +// by this client implementation. func (c *FakeDiscovery) RESTClient() restclient.Interface { return nil } diff --git a/vendor/k8s.io/client-go/discovery/helper.go b/vendor/k8s.io/client-go/discovery/helper.go index 353d34b3c5ac5..3bfe514e82341 100644 --- a/vendor/k8s.io/client-go/discovery/helper.go +++ b/vendor/k8s.io/client-go/discovery/helper.go @@ -31,11 +31,11 @@ import ( func MatchesServerVersion(clientVersion apimachineryversion.Info, client DiscoveryInterface) error { sVer, err := client.ServerVersion() if err != nil { - return fmt.Errorf("couldn't read version from server: %v\n", err) + return fmt.Errorf("couldn't read version from server: %v", err) } // GitVersion includes GitCommit and GitTreeState, but best to be safe? if clientVersion.GitVersion != sVer.GitVersion || clientVersion.GitCommit != sVer.GitCommit || clientVersion.GitTreeState != sVer.GitTreeState { - return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, clientVersion) + return fmt.Errorf("server version (%#v) differs from client version (%#v)", sVer, clientVersion) } return nil @@ -101,12 +101,15 @@ func FilteredBy(pred ResourcePredicate, rls []*metav1.APIResourceList) []*metav1 return result } +// ResourcePredicate has a method to check if a resource matches a given condition. type ResourcePredicate interface { Match(groupVersion string, r *metav1.APIResource) bool } +// ResourcePredicateFunc returns true if it matches a resource based on a custom condition. type ResourcePredicateFunc func(groupVersion string, r *metav1.APIResource) bool +// Match is a wrapper around ResourcePredicateFunc. func (fn ResourcePredicateFunc) Match(groupVersion string, r *metav1.APIResource) bool { return fn(groupVersion, r) } @@ -116,6 +119,7 @@ type SupportsAllVerbs struct { Verbs []string } +// Match checks if a resource contains all the given verbs. func (p SupportsAllVerbs) Match(groupVersion string, r *metav1.APIResource) bool { return sets.NewString([]string(r.Verbs)...).HasAll(p.Verbs...) } diff --git a/vendor/k8s.io/client-go/discovery/round_tripper.go b/vendor/k8s.io/client-go/discovery/round_tripper.go index 75b7f52097711..4e2bc24e774d2 100644 --- a/vendor/k8s.io/client-go/discovery/round_tripper.go +++ b/vendor/k8s.io/client-go/discovery/round_tripper.go @@ -20,10 +20,10 @@ import ( "net/http" "path/filepath" - "github.com/golang/glog" "github.com/gregjones/httpcache" "github.com/gregjones/httpcache/diskcache" "github.com/peterbourgon/diskv" + "k8s.io/klog" ) type cacheRoundTripper struct { @@ -55,7 +55,7 @@ func (rt *cacheRoundTripper) CancelRequest(req *http.Request) { if cr, ok := rt.rt.Transport.(canceler); ok { cr.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport) + klog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport) } } diff --git a/vendor/k8s.io/client-go/informers/BUILD.bazel b/vendor/k8s.io/client-go/informers/BUILD.bazel index cd1dacf068ba1..8f85fb1ab3b7c 100644 --- a/vendor/k8s.io/client-go/informers/BUILD.bazel +++ b/vendor/k8s.io/client-go/informers/BUILD.bazel @@ -15,6 +15,7 @@ go_library( "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/api/auditregistration/v1alpha1:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", "//vendor/k8s.io/api/autoscaling/v2beta2:go_default_library", @@ -42,6 +43,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/client-go/informers/admissionregistration:go_default_library", "//vendor/k8s.io/client-go/informers/apps:go_default_library", + "//vendor/k8s.io/client-go/informers/auditregistration:go_default_library", "//vendor/k8s.io/client-go/informers/autoscaling:go_default_library", "//vendor/k8s.io/client-go/informers/batch:go_default_library", "//vendor/k8s.io/client-go/informers/certificates:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/auditregistration/BUILD.bazel b/vendor/k8s.io/client-go/informers/auditregistration/BUILD.bazel new file mode 100644 index 0000000000000..d6fc2321e43e2 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/auditregistration/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["interface.go"], + importmap = "k8s.io/kops/vendor/k8s.io/client-go/informers/auditregistration", + importpath = "k8s.io/client-go/informers/auditregistration", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/client-go/informers/auditregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", + ], +) diff --git a/vendor/k8s.io/client-go/informers/auditregistration/interface.go b/vendor/k8s.io/client-go/informers/auditregistration/interface.go new file mode 100644 index 0000000000000..0f1682c478d64 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/auditregistration/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package auditregistration + +import ( + v1alpha1 "k8s.io/client-go/informers/auditregistration/v1alpha1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/BUILD.bazel new file mode 100644 index 0000000000000..7c9118f5ce24b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "auditsink.go", + "interface.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1", + importpath = "k8s.io/client-go/informers/auditregistration/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/auditregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/listers/auditregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) diff --git a/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go b/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go new file mode 100644 index 0000000000000..69778ad2cfed6 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/auditregistration/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// AuditSinkInformer provides access to a shared informer and lister for +// AuditSinks. +type AuditSinkInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.AuditSinkLister +} + +type auditSinkInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewAuditSinkInformer constructs a new informer for AuditSink type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewAuditSinkInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredAuditSinkInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredAuditSinkInformer constructs a new informer for AuditSink type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredAuditSinkInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuditregistrationV1alpha1().AuditSinks().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuditregistrationV1alpha1().AuditSinks().Watch(options) + }, + }, + &auditregistrationv1alpha1.AuditSink{}, + resyncPeriod, + indexers, + ) +} + +func (f *auditSinkInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredAuditSinkInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *auditSinkInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&auditregistrationv1alpha1.AuditSink{}, f.defaultInformer) +} + +func (f *auditSinkInformer) Lister() v1alpha1.AuditSinkLister { + return v1alpha1.NewAuditSinkLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go new file mode 100644 index 0000000000000..0a67ba821df5a --- /dev/null +++ b/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // AuditSinks returns a AuditSinkInformer. + AuditSinks() AuditSinkInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// AuditSinks returns a AuditSinkInformer. +func (v *version) AuditSinks() AuditSinkInformer { + return &auditSinkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index 7ae22ee2c82ac..88ead6213a197 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -28,6 +28,7 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" admissionregistration "k8s.io/client-go/informers/admissionregistration" apps "k8s.io/client-go/informers/apps" + auditregistration "k8s.io/client-go/informers/auditregistration" autoscaling "k8s.io/client-go/informers/autoscaling" batch "k8s.io/client-go/informers/batch" certificates "k8s.io/client-go/informers/certificates" @@ -188,6 +189,7 @@ type SharedInformerFactory interface { Admissionregistration() admissionregistration.Interface Apps() apps.Interface + Auditregistration() auditregistration.Interface Autoscaling() autoscaling.Interface Batch() batch.Interface Certificates() certificates.Interface @@ -211,6 +213,10 @@ func (f *sharedInformerFactory) Apps() apps.Interface { return apps.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Auditregistration() auditregistration.Interface { + return auditregistration.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Autoscaling() autoscaling.Interface { return autoscaling.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index 3af96304a5c59..09a5efe2ec89d 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -26,6 +26,7 @@ import ( v1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" v1beta2 "k8s.io/api/apps/v1beta2" + auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" autoscalingv1 "k8s.io/api/autoscaling/v1" v2beta1 "k8s.io/api/autoscaling/v2beta1" v2beta2 "k8s.io/api/autoscaling/v2beta2" @@ -120,6 +121,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1beta2.SchemeGroupVersion.WithResource("statefulsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().StatefulSets().Informer()}, nil + // Group=auditregistration.k8s.io, Version=v1alpha1 + case auditregistrationv1alpha1.SchemeGroupVersion.WithResource("auditsinks"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Auditregistration().V1alpha1().AuditSinks().Informer()}, nil + // Group=autoscaling, Version=v1 case autoscalingv1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1().HorizontalPodAutoscalers().Informer()}, nil @@ -257,6 +262,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=storage.k8s.io, Version=v1 case storagev1.SchemeGroupVersion.WithResource("storageclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().StorageClasses().Informer()}, nil + case storagev1.SchemeGroupVersion.WithResource("volumeattachments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().VolumeAttachments().Informer()}, nil // Group=storage.k8s.io, Version=v1alpha1 case storagev1alpha1.SchemeGroupVersion.WithResource("volumeattachments"): diff --git a/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go index 5e05516b13404..b00ed70cfdb71 100644 --- a/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go @@ -27,6 +27,7 @@ import ( cache "k8s.io/client-go/tools/cache" ) +// NewInformerFunc takes kubernetes.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(kubernetes.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -35,4 +36,5 @@ type SharedInformerFactory interface { InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/client-go/informers/storage/v1/BUILD.bazel b/vendor/k8s.io/client-go/informers/storage/v1/BUILD.bazel index 9f9ec99351660..42f97f6110a07 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1/BUILD.bazel +++ b/vendor/k8s.io/client-go/informers/storage/v1/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "interface.go", "storageclass.go", + "volumeattachment.go", ], importmap = "k8s.io/kops/vendor/k8s.io/client-go/informers/storage/v1", importpath = "k8s.io/client-go/informers/storage/v1", diff --git a/vendor/k8s.io/client-go/informers/storage/v1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1/interface.go index d7e4b5c49acef..64fc2bd84369d 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1/interface.go +++ b/vendor/k8s.io/client-go/informers/storage/v1/interface.go @@ -26,6 +26,8 @@ import ( type Interface interface { // StorageClasses returns a StorageClassInformer. StorageClasses() StorageClassInformer + // VolumeAttachments returns a VolumeAttachmentInformer. + VolumeAttachments() VolumeAttachmentInformer } type version struct { @@ -43,3 +45,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (v *version) StorageClasses() StorageClassInformer { return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// VolumeAttachments returns a VolumeAttachmentInformer. +func (v *version) VolumeAttachments() VolumeAttachmentInformer { + return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go new file mode 100644 index 0000000000000..7ca3b86f22cfc --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/storage/v1" + cache "k8s.io/client-go/tools/cache" +) + +// VolumeAttachmentInformer provides access to a shared informer and lister for +// VolumeAttachments. +type VolumeAttachmentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.VolumeAttachmentLister +} + +type volumeAttachmentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().VolumeAttachments().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().VolumeAttachments().Watch(options) + }, + }, + &storagev1.VolumeAttachment{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storagev1.VolumeAttachment{}, f.defaultInformer) +} + +func (f *volumeAttachmentInformer) Lister() v1.VolumeAttachmentLister { + return v1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/kubernetes/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/BUILD.bazel index b233d895b5108..189461c9378a6 100644 --- a/vendor/k8s.io/client-go/kubernetes/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/BUILD.bazel @@ -17,6 +17,7 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 122e4bb7f198b..6ad01d6db19a5 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -25,6 +25,7 @@ import ( appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" + auditregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1" authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" @@ -66,6 +67,9 @@ type Interface interface { AppsV1() appsv1.AppsV1Interface // Deprecated: please explicitly pick a version if possible. Apps() appsv1.AppsV1Interface + AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Auditregistration() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface AuthenticationV1() authenticationv1.AuthenticationV1Interface // Deprecated: please explicitly pick a version if possible. Authentication() authenticationv1.AuthenticationV1Interface @@ -133,6 +137,7 @@ type Clientset struct { appsV1beta1 *appsv1beta1.AppsV1beta1Client appsV1beta2 *appsv1beta2.AppsV1beta2Client appsV1 *appsv1.AppsV1Client + auditregistrationV1alpha1 *auditregistrationv1alpha1.AuditregistrationV1alpha1Client authenticationV1 *authenticationv1.AuthenticationV1Client authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client authorizationV1 *authorizationv1.AuthorizationV1Client @@ -198,6 +203,17 @@ func (c *Clientset) Apps() appsv1.AppsV1Interface { return c.appsV1 } +// AuditregistrationV1alpha1 retrieves the AuditregistrationV1alpha1Client +func (c *Clientset) AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { + return c.auditregistrationV1alpha1 +} + +// Deprecated: Auditregistration retrieves the default version of AuditregistrationClient. +// Please explicitly pick a version. +func (c *Clientset) Auditregistration() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { + return c.auditregistrationV1alpha1 +} + // AuthenticationV1 retrieves the AuthenticationV1Client func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { return c.authenticationV1 @@ -454,6 +470,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.auditregistrationV1alpha1, err = auditregistrationv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.authenticationV1, err = authenticationv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -575,6 +595,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) cs.appsV1 = appsv1.NewForConfigOrDie(c) + cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.NewForConfigOrDie(c) cs.authenticationV1 = authenticationv1.NewForConfigOrDie(c) cs.authenticationV1beta1 = authenticationv1beta1.NewForConfigOrDie(c) cs.authorizationV1 = authorizationv1.NewForConfigOrDie(c) @@ -614,6 +635,7 @@ func New(c rest.Interface) *Clientset { cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) cs.appsV1 = appsv1.New(c) + cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.New(c) cs.authenticationV1 = authenticationv1.New(c) cs.authenticationV1beta1 = authenticationv1beta1.New(c) cs.authorizationV1 = authorizationv1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/fake/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/fake/BUILD.bazel index c4c92322c6df3..964b51758ce75 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/fake/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/api/auditregistration/v1alpha1:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", "//vendor/k8s.io/api/authentication/v1beta1:go_default_library", "//vendor/k8s.io/api/authorization/v1:go_default_library", @@ -61,6 +62,8 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index a23b3165a04a1..47b63ffaed98c 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -34,6 +34,8 @@ import ( fakeappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" fakeappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake" + auditregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1" + fakeauditregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake" authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" fakeauthenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1/fake" authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" @@ -166,6 +168,16 @@ func (c *Clientset) Apps() appsv1.AppsV1Interface { return &fakeappsv1.FakeAppsV1{Fake: &c.Fake} } +// AuditregistrationV1alpha1 retrieves the AuditregistrationV1alpha1Client +func (c *Clientset) AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { + return &fakeauditregistrationv1alpha1.FakeAuditregistrationV1alpha1{Fake: &c.Fake} +} + +// Auditregistration retrieves the AuditregistrationV1alpha1Client +func (c *Clientset) Auditregistration() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { + return &fakeauditregistrationv1alpha1.FakeAuditregistrationV1alpha1{Fake: &c.Fake} +} + // AuthenticationV1 retrieves the AuthenticationV1Client func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { return &fakeauthenticationv1.FakeAuthenticationV1{Fake: &c.Fake} diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go index c429979688b72..6e1e1fb29365e 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -24,6 +24,7 @@ import ( appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" + auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" authenticationv1 "k8s.io/api/authentication/v1" authenticationv1beta1 "k8s.io/api/authentication/v1beta1" authorizationv1 "k8s.io/api/authorization/v1" @@ -66,6 +67,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, appsv1.AddToScheme, + auditregistrationv1alpha1.AddToScheme, authenticationv1.AddToScheme, authenticationv1beta1.AddToScheme, authorizationv1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel index 4da5b192a8494..fd1d43a10fd62 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel @@ -15,6 +15,7 @@ go_library( "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/api/auditregistration/v1alpha1:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", "//vendor/k8s.io/api/authentication/v1beta1:go_default_library", "//vendor/k8s.io/api/authorization/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 9ca89b76e44f8..e336eb9179e0b 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -24,6 +24,7 @@ import ( appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" + auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" authenticationv1 "k8s.io/api/authentication/v1" authenticationv1beta1 "k8s.io/api/authentication/v1beta1" authorizationv1 "k8s.io/api/authorization/v1" @@ -66,6 +67,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, appsv1.AddToScheme, + auditregistrationv1alpha1.AddToScheme, authenticationv1.AddToScheme, authenticationv1beta1.AddToScheme, authorizationv1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go index b927dae2cd34f..4baee66104dfb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go @@ -112,7 +112,7 @@ func (c *FakeInitializerConfigurations) DeleteCollection(options *v1.DeleteOptio // Patch applies the patch and returns the patched initializerConfiguration. func (c *FakeInitializerConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(initializerconfigurationsResource, name, data, subresources...), &v1alpha1.InitializerConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(initializerconfigurationsResource, name, pt, data, subresources...), &v1alpha1.InitializerConfiguration{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go index e014ea72b69ec..7b8acecee9b22 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *initializerConfigurations) Get(name string, options v1.GetOptions) (res // List takes label and field selectors, and returns the list of InitializerConfigurations that match those selectors. func (c *initializerConfigurations) List(opts v1.ListOptions) (result *v1alpha1.InitializerConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.InitializerConfigurationList{} err = c.client.Get(). Resource("initializerconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *initializerConfigurations) List(opts v1.ListOptions) (result *v1alpha1. // Watch returns a watch.Interface that watches the requested initializerConfigurations. func (c *initializerConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("initializerconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *initializerConfigurations) Delete(name string, options *v1.DeleteOption // DeleteCollection deletes a collection of objects. func (c *initializerConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("initializerconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go index e06888cc13c8a..d2177bad52d27 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go @@ -112,7 +112,7 @@ func (c *FakeMutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteO // Patch applies the patch and returns the patched mutatingWebhookConfiguration. func (c *FakeMutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, data, subresources...), &v1beta1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.MutatingWebhookConfiguration{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go index 1069634e2368d..6be2b393866b7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go @@ -112,7 +112,7 @@ func (c *FakeValidatingWebhookConfigurations) DeleteCollection(options *v1.Delet // Patch applies the patch and returns the patched validatingWebhookConfiguration. func (c *FakeValidatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, data, subresources...), &v1beta1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.ValidatingWebhookConfiguration{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index cb0157102914d..4524896cd6b74 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.MutatingWebhookConfigurationList{} err = c.client.Get(). Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1bet // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("mutatingwebhookconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 3a9339f6cdc8a..7e711b3000e1c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ValidatingWebhookConfigurationList{} err = c.client.Get(). Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1b // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *validatingWebhookConfigurations) Delete(name string, options *v1.Delete // DeleteCollection deletes a collection of objects. func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("validatingwebhookconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD.bazel index b61f8828a088c..fbe773a3c2ad6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD.bazel @@ -17,6 +17,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go index 1ddaa1a71b7d5..e28e4d2a3fa65 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *controllerRevisions) Get(name string, options metav1.GetOptions) (resul // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.Controll // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *controllerRevisions) Delete(name string, options *metav1.DeleteOptions) // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go index 03a87069840ba..a535cdabe649c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *daemonSets) Get(name string, options metav1.GetOptions) (result *v1.Dae // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, er // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *daemonSets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index 73d46f8bb6cb3..f9799a453955c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -19,7 +19,10 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -44,6 +47,9 @@ type DeploymentInterface interface { List(opts metav1.ListOptions) (*v1.DeploymentList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) + GetScale(deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + DeploymentExpansion } @@ -76,11 +82,16 @@ func (c *deployments) Get(name string, options metav1.GetOptions) (result *v1.De // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +99,16 @@ func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +166,15 @@ func (c *deployments) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -172,3 +193,31 @@ func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subres Into(result) return } + +// GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *deployments) GetScale(deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *deployments) UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/BUILD.bazel index 49a9f0d4894df..cc7672becd2e9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/BUILD.bazel @@ -16,6 +16,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go index fc2808daf88c8..eb38bca41bab2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go @@ -119,7 +119,7 @@ func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched controllerRevision. func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.ControllerRevision, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, data, subresources...), &appsv1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &appsv1.ControllerRevision{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go index 89e72ebd399e2..c06336e9701e6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go @@ -131,7 +131,7 @@ func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched daemonSet. func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.DaemonSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, data, subresources...), &appsv1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &appsv1.DaemonSet{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go index 2fbd82d6b8e37..6a8cb379da81b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go @@ -20,6 +20,7 @@ package fake import ( appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -131,10 +132,32 @@ func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.Deployment, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &appsv1.Deployment{}) + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &appsv1.Deployment{}) if obj == nil { return nil, err } return obj.(*appsv1.Deployment), err } + +// GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. +func (c *FakeDeployments) GetScale(deploymentName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} + +// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *FakeDeployments) UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go index 7b882c8630c7f..e871f82f76614 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go @@ -20,6 +20,7 @@ package fake import ( appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -131,10 +132,32 @@ func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched replicaSet. func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.ReplicaSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, data, subresources...), &appsv1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &appsv1.ReplicaSet{}) if obj == nil { return nil, err } return obj.(*appsv1.ReplicaSet), err } + +// GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. +func (c *FakeReplicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} + +// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *FakeReplicaSets) UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go index 3cd643a597922..83e80bff4973c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go @@ -20,6 +20,7 @@ package fake import ( appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -131,10 +132,32 @@ func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched statefulSet. func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.StatefulSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, data, subresources...), &appsv1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &appsv1.StatefulSet{}) if obj == nil { return nil, err } return obj.(*appsv1.StatefulSet), err } + +// GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. +func (c *FakeStatefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} + +// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *FakeStatefulSets) UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index 077941162d022..ff3504e78a4ea 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -19,7 +19,10 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -44,6 +47,9 @@ type ReplicaSetInterface interface { List(opts metav1.ListOptions) (*v1.ReplicaSetList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) + GetScale(replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + ReplicaSetExpansion } @@ -76,11 +82,16 @@ func (c *replicaSets) Get(name string, options metav1.GetOptions) (result *v1.Re // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +99,16 @@ func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +166,15 @@ func (c *replicaSets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -172,3 +193,31 @@ func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subres Into(result) return } + +// GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *replicaSets) GetScale(replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *replicaSets) UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index 54322d97d31b1..c12c470bbae80 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -19,7 +19,10 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -44,6 +47,9 @@ type StatefulSetInterface interface { List(opts metav1.ListOptions) (*v1.StatefulSetList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) + GetScale(statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + StatefulSetExpansion } @@ -76,11 +82,16 @@ func (c *statefulSets) Get(name string, options metav1.GetOptions) (result *v1.S // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +99,16 @@ func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +166,15 @@ func (c *statefulSets) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -172,3 +193,31 @@ func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subre Into(result) return } + +// GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *statefulSets) GetScale(statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *statefulSets) UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD.bazel index f7a246b3b22b4..86513a685e996 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD.bazel @@ -8,7 +8,6 @@ go_library( "deployment.go", "doc.go", "generated_expansion.go", - "scale.go", "statefulset.go", ], importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go index 4d882e26e7d74..2c9db886b1e28 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -29,7 +29,6 @@ type AppsV1beta1Interface interface { RESTClient() rest.Interface ControllerRevisionsGetter DeploymentsGetter - ScalesGetter StatefulSetsGetter } @@ -46,10 +45,6 @@ func (c *AppsV1beta1Client) Deployments(namespace string) DeploymentInterface { return newDeployments(c, namespace) } -func (c *AppsV1beta1Client) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - func (c *AppsV1beta1Client) StatefulSets(namespace string) StatefulSetInterface { return newStatefulSets(c, namespace) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go index ec8fa9242f5ab..45ddb91592de7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.Control // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index 365e06f3f10e8..05fdcb7a644f1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/BUILD.bazel index c771bf9256d92..0afbbfff2bfc4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/BUILD.bazel @@ -7,7 +7,6 @@ go_library( "fake_apps_client.go", "fake_controllerrevision.go", "fake_deployment.go", - "fake_scale.go", "fake_statefulset.go", ], importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go index 2ff602be9b6fe..8e65d78d29ae6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go @@ -36,10 +36,6 @@ func (c *FakeAppsV1beta1) Deployments(namespace string) v1beta1.DeploymentInterf return &FakeDeployments{c, namespace} } -func (c *FakeAppsV1beta1) Scales(namespace string) v1beta1.ScaleInterface { - return &FakeScales{c, namespace} -} - func (c *FakeAppsV1beta1) StatefulSets(namespace string) v1beta1.StatefulSetInterface { return &FakeStatefulSets{c, namespace} } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go index 9241948911311..8e339d78b0fc1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go @@ -119,7 +119,7 @@ func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched controllerRevision. func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, data, subresources...), &v1beta1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta1.ControllerRevision{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go index c4749c52b3bfb..c33baba589e71 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go @@ -131,7 +131,7 @@ func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go deleted file mode 100644 index de71947e5230a..0000000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -// FakeScales implements ScaleInterface -type FakeScales struct { - Fake *FakeAppsV1beta1 - ns string -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go index b0f194a7ddd44..754da5fba6576 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go @@ -131,7 +131,7 @@ func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched statefulSet. func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, data, subresources...), &v1beta1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.StatefulSet{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go index b2bfd73a77bd1..113455df24c59 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go @@ -22,6 +22,4 @@ type ControllerRevisionExpansion interface{} type DeploymentExpansion interface{} -type ScaleExpansion interface{} - type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index 651745451d8ff..c4b35b424c702 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetLis // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD.bazel index 2302b597f919b..8682836ae960e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD.bazel @@ -10,7 +10,6 @@ go_library( "doc.go", "generated_expansion.go", "replicaset.go", - "scale.go", "statefulset.go", ], importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go index 27549499fb68d..99d677f405029 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -31,7 +31,6 @@ type AppsV1beta2Interface interface { DaemonSetsGetter DeploymentsGetter ReplicaSetsGetter - ScalesGetter StatefulSetsGetter } @@ -56,10 +55,6 @@ func (c *AppsV1beta2Client) ReplicaSets(namespace string) ReplicaSetInterface { return newReplicaSets(c, namespace) } -func (c *AppsV1beta2Client) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - func (c *AppsV1beta2Client) StatefulSets(namespace string) StatefulSetInterface { return newStatefulSets(c, namespace) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go index 1271cc623fef4..e1d6025155ed5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.Control // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go index 683c068121627..f8b7ac2597339 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.Da // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, e // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go index 9a04513f1ba1b..510250b06e175 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta2.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/BUILD.bazel index c61e9e47e129f..3d28590ec35a6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/BUILD.bazel @@ -9,7 +9,6 @@ go_library( "fake_daemonset.go", "fake_deployment.go", "fake_replicaset.go", - "fake_scale.go", "fake_statefulset.go", ], importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go index f7d79d3522588..0ec34a2cdbc3d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go @@ -44,10 +44,6 @@ func (c *FakeAppsV1beta2) ReplicaSets(namespace string) v1beta2.ReplicaSetInterf return &FakeReplicaSets{c, namespace} } -func (c *FakeAppsV1beta2) Scales(namespace string) v1beta2.ScaleInterface { - return &FakeScales{c, namespace} -} - func (c *FakeAppsV1beta2) StatefulSets(namespace string) v1beta2.StatefulSetInterface { return &FakeStatefulSets{c, namespace} } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go index 954ac35df8c7e..197f190cbdad5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go @@ -119,7 +119,7 @@ func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched controllerRevision. func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, data, subresources...), &v1beta2.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta2.ControllerRevision{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go index 38a1475503e6c..b50747fdc9c03 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go @@ -131,7 +131,7 @@ func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched daemonSet. func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, data, subresources...), &v1beta2.DaemonSet{}) + Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.DaemonSet{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go index cae2322424bc0..b74d24ed7c4d4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go @@ -131,7 +131,7 @@ func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &v1beta2.Deployment{}) + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta2.Deployment{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go index 05fa789318020..ba1de33ecf2c3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go @@ -131,7 +131,7 @@ func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched replicaSet. func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, data, subresources...), &v1beta2.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta2.ReplicaSet{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go deleted file mode 100644 index b06b7e8e303e8..0000000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -// FakeScales implements ScaleInterface -type FakeScales struct { - Fake *FakeAppsV1beta2 - ns string -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go index fe78512862a59..652c7cbc5d67c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go @@ -131,7 +131,7 @@ func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched statefulSet. func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, data, subresources...), &v1beta2.StatefulSet{}) + Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.StatefulSet{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go index bceae5986233e..6a21749687d22 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go @@ -26,6 +26,4 @@ type DeploymentExpansion interface{} type ReplicaSetExpansion interface{} -type ScaleExpansion interface{} - type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go index 9fd9de930ba24..7b738774b79df 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.R // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index 095601e15a581..de7c3db8b5068 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -79,11 +81,16 @@ func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta2. // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -91,11 +98,16 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetLis // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -153,10 +165,15 @@ func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/BUILD.bazel similarity index 51% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/BUILD.bazel rename to vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/BUILD.bazel index 11b386716d9a5..e8bad20ffa2aa 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/BUILD.bazel @@ -3,20 +3,21 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "auditregistration_client.go", + "auditsink.go", "doc.go", "generated_expansion.go", - "priorityclass.go", - "scheduling_client.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion", + importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1", + importpath = "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1", visibility = ["//visibility:public"], deps = [ + "//vendor/k8s.io/api/auditregistration/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/scheduling:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go similarity index 51% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/settings_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go index 8c365dc6f9e68..f007b05ef7df1 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/settings_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go @@ -16,29 +16,31 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package internalversion +package v1alpha1 import ( + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) -type SettingsInterface interface { +type AuditregistrationV1alpha1Interface interface { RESTClient() rest.Interface - PodPresetsGetter + AuditSinksGetter } -// SettingsClient is used to interact with features provided by the settings.k8s.io group. -type SettingsClient struct { +// AuditregistrationV1alpha1Client is used to interact with features provided by the auditregistration.k8s.io group. +type AuditregistrationV1alpha1Client struct { restClient rest.Interface } -func (c *SettingsClient) PodPresets(namespace string) PodPresetInterface { - return newPodPresets(c, namespace) +func (c *AuditregistrationV1alpha1Client) AuditSinks() AuditSinkInterface { + return newAuditSinks(c) } -// NewForConfig creates a new SettingsClient for the given config. -func NewForConfig(c *rest.Config) (*SettingsClient, error) { +// NewForConfig creates a new AuditregistrationV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*AuditregistrationV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -47,12 +49,12 @@ func NewForConfig(c *rest.Config) (*SettingsClient, error) { if err != nil { return nil, err } - return &SettingsClient{client}, nil + return &AuditregistrationV1alpha1Client{client}, nil } -// NewForConfigOrDie creates a new SettingsClient for the given config and +// NewForConfigOrDie creates a new AuditregistrationV1alpha1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SettingsClient { +func NewForConfigOrDie(c *rest.Config) *AuditregistrationV1alpha1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -60,35 +62,27 @@ func NewForConfigOrDie(c *rest.Config) *SettingsClient { return client } -// New creates a new SettingsClient for the given RESTClient. -func New(c rest.Interface) *SettingsClient { - return &SettingsClient{c} +// New creates a new AuditregistrationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *AuditregistrationV1alpha1Client { + return &AuditregistrationV1alpha1Client{c} } func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("settings.k8s.io")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("settings.k8s.io")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } return nil } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *SettingsClient) RESTClient() rest.Interface { +func (c *AuditregistrationV1alpha1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go new file mode 100644 index 0000000000000..414d480062eb8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// AuditSinksGetter has a method to return a AuditSinkInterface. +// A group's client should implement this interface. +type AuditSinksGetter interface { + AuditSinks() AuditSinkInterface +} + +// AuditSinkInterface has methods to work with AuditSink resources. +type AuditSinkInterface interface { + Create(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) + Update(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.AuditSink, error) + List(opts v1.ListOptions) (*v1alpha1.AuditSinkList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) + AuditSinkExpansion +} + +// auditSinks implements AuditSinkInterface +type auditSinks struct { + client rest.Interface +} + +// newAuditSinks returns a AuditSinks +func newAuditSinks(c *AuditregistrationV1alpha1Client) *auditSinks { + return &auditSinks{ + client: c.RESTClient(), + } +} + +// Get takes name of the auditSink, and returns the corresponding auditSink object, and an error if there is any. +func (c *auditSinks) Get(name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Get(). + Resource("auditsinks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of AuditSinks that match those selectors. +func (c *auditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.AuditSinkList{} + err = c.client.Get(). + Resource("auditsinks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested auditSinks. +func (c *auditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("auditsinks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a auditSink and creates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *auditSinks) Create(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Post(). + Resource("auditsinks"). + Body(auditSink). + Do(). + Into(result) + return +} + +// Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *auditSinks) Update(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Put(). + Resource("auditsinks"). + Name(auditSink.Name). + Body(auditSink). + Do(). + Into(result) + return +} + +// Delete takes name of the auditSink and deletes it. Returns an error if one occurs. +func (c *auditSinks) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("auditsinks"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *auditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("auditsinks"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched auditSink. +func (c *auditSinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Patch(pt). + Resource("auditsinks"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go index 86602442babdc..df51baa4d4c17 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package internalversion +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/BUILD.bazel new file mode 100644 index 0000000000000..98e594e89e083 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "fake_auditregistration_client.go", + "fake_auditsink.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake", + importpath = "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/auditregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/doc.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/doc.go index 1b59c8431cea3..16f44399065ed 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/doc.go @@ -16,4 +16,5 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package internalversion +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditregistration_client.go similarity index 54% rename from vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go rename to vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditregistration_client.go index cef27bd1457cd..c22acabcf47e5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditregistration_client.go @@ -16,33 +16,25 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1beta1 +package fake import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1" rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" ) -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface +type FakeAuditregistrationV1alpha1 struct { + *testing.Fake } -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion +func (c *FakeAuditregistrationV1alpha1) AuditSinks() v1alpha1.AuditSinkInterface { + return &FakeAuditSinks{c} } -// scales implements ScaleInterface -type scales struct { - client rest.Interface - ns string -} - -// newScales returns a Scales -func newScales(c *AppsV1beta1Client, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAuditregistrationV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go new file mode 100644 index 0000000000000..d0bb9fd00094f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeAuditSinks implements AuditSinkInterface +type FakeAuditSinks struct { + Fake *FakeAuditregistrationV1alpha1 +} + +var auditsinksResource = schema.GroupVersionResource{Group: "auditregistration.k8s.io", Version: "v1alpha1", Resource: "auditsinks"} + +var auditsinksKind = schema.GroupVersionKind{Group: "auditregistration.k8s.io", Version: "v1alpha1", Kind: "AuditSink"} + +// Get takes name of the auditSink, and returns the corresponding auditSink object, and an error if there is any. +func (c *FakeAuditSinks) Get(name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(auditsinksResource, name), &v1alpha1.AuditSink{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.AuditSink), err +} + +// List takes label and field selectors, and returns the list of AuditSinks that match those selectors. +func (c *FakeAuditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(auditsinksResource, auditsinksKind, opts), &v1alpha1.AuditSinkList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.AuditSinkList{ListMeta: obj.(*v1alpha1.AuditSinkList).ListMeta} + for _, item := range obj.(*v1alpha1.AuditSinkList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested auditSinks. +func (c *FakeAuditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(auditsinksResource, opts)) +} + +// Create takes the representation of a auditSink and creates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *FakeAuditSinks) Create(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(auditsinksResource, auditSink), &v1alpha1.AuditSink{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.AuditSink), err +} + +// Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *FakeAuditSinks) Update(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(auditsinksResource, auditSink), &v1alpha1.AuditSink{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.AuditSink), err +} + +// Delete takes name of the auditSink and deletes it. Returns an error if one occurs. +func (c *FakeAuditSinks) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(auditsinksResource, name), &v1alpha1.AuditSink{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeAuditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(auditsinksResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.AuditSinkList{}) + return err +} + +// Patch applies the patch and returns the patched auditSink. +func (c *FakeAuditSinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(auditsinksResource, name, pt, data, subresources...), &v1alpha1.AuditSink{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.AuditSink), err +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go index 1b59c8431cea3..f0f5117264137 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go @@ -16,4 +16,6 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package internalversion +package v1alpha1 + +type AuditSinkExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go index 7df8343753cad..6a4bf98810d16 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go @@ -131,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOption // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &autoscalingv1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &autoscalingv1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index 6891b6b63ddba..0e0839fb508ef 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *horizontalPodAutoscalers) Get(name string, options metav1.GetOptions) ( // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.Hor // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *metav1.DeleteOpt // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go index 2d860341af813..514a787cb1d94 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go @@ -131,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOption // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index 4ac8cce71bf71..02d5cfb9b60f5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package v2beta1 import ( + "time" + v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (resu // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v2beta1.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.Ho // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go index a19b86e2d0370..c0569f00adbd7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go @@ -131,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOption // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta2.HorizontalPodAutoscaler{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go index ddabda7e702c3..91a0fa64f9abc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package v2beta2 import ( + "time" + v2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (resu // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v2beta2.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.Ho // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go index f12619bb423e1..06dc25c6b48d1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go @@ -131,7 +131,7 @@ func (c *FakeJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li // Patch applies the patch and returns the patched job. func (c *FakeJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *batchv1.Job, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, data, subresources...), &batchv1.Job{}) + Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, pt, data, subresources...), &batchv1.Job{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index ba8332a9a25ac..b55c602b34188 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *jobs) Get(name string, options metav1.GetOptions) (result *v1.Job, err // List takes label and field selectors, and returns the list of Jobs that match those selectors. func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.JobList{} err = c.client.Get(). Namespace(c.ns). Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { // Watch returns a watch.Interface that watches the requested jobs. func (c *jobs) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *jobs) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *jobs) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("jobs"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go index 04637c36aa7aa..d89d2fa21d417 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.Cron // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.CronJobList{} err = c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err e // Watch returns a watch.Interface that watches the requested cronJobs. func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go index d80ef5e67e93c..3985c4037467a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go @@ -131,7 +131,7 @@ func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v // Patch applies the patch and returns the patched cronJob. func (c *FakeCronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, data, subresources...), &v1beta1.CronJob{}) + Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v1beta1.CronJob{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go index 4d922f9ae9e25..19123b60411b1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go @@ -19,6 +19,8 @@ limitations under the License. package v2alpha1 import ( + "time" + v2alpha1 "k8s.io/api/batch/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.Cro // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v2alpha1.CronJobList{} err = c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err // Watch returns a watch.Interface that watches the requested cronJobs. func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go index 75c0b17338f4f..2195027d27228 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go @@ -131,7 +131,7 @@ func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v // Patch applies the patch and returns the patched cronJob. func (c *FakeCronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, data, subresources...), &v2alpha1.CronJob{}) + Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v2alpha1.CronJob{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index b39169a8ff6ee..712d3a01afa49 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (re // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.CertificateSigningRequestList{} err = c.client.Get(). Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1. // Watch returns a watch.Interface that watches the requested certificateSigningRequests. func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("certificatesigningrequests"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go index dfd5171951e7c..aa45c88033658 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go @@ -123,7 +123,7 @@ func (c *FakeCertificateSigningRequests) DeleteCollection(options *v1.DeleteOpti // Patch applies the patch and returns the patched certificateSigningRequest. func (c *FakeCertificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, data, subresources...), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &v1beta1.CertificateSigningRequest{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go index 3204e02913ed9..0ebf3bffc2b60 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go @@ -119,7 +119,7 @@ func (c *FakeLeases) DeleteCollection(options *v1.DeleteOptions, listOptions v1. // Patch applies the patch and returns the patched lease. func (c *FakeLeases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, data, subresources...), &v1beta1.Lease{}) + Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &v1beta1.Lease{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go index 16277255fa6c0..490d815aa6334 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *leases) Get(name string, options v1.GetOptions) (result *v1beta1.Lease, // List takes label and field selectors, and returns the list of Leases that match those selectors. func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.LeaseList{} err = c.client.Get(). Namespace(c.ns). Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error // Watch returns a watch.Interface that watches the requested leases. func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *leases) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *leases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("leases"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD.bazel index 98db57602f609..6fc2c425cb7bf 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD.bazel @@ -34,8 +34,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/authentication/v1:go_default_library", + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index e497661cfbe05..302b2fdc3440f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *componentStatuses) Get(name string, options metav1.GetOptions) (result // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ComponentStatusList{} err = c.client.Get(). Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentS // Watch returns a watch.Interface that watches the requested componentStatuses. func (c *componentStatuses) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *componentStatuses) Delete(name string, options *metav1.DeleteOptions) e // DeleteCollection deletes a collection of objects. func (c *componentStatuses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("componentstatuses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index 0984ae70cc1c4..18ce954ae23d3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *configMaps) Get(name string, options metav1.GetOptions) (result *v1.Con // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ConfigMapList{} err = c.client.Get(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, er // Watch returns a watch.Interface that watches the requested configMaps. func (c *configMaps) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *configMaps) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *configMaps) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index dd8216789bc93..978a2a196cc56 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *endpoints) Get(name string, options metav1.GetOptions) (result *v1.Endp // List takes label and field selectors, and returns the list of Endpoints that match those selectors. func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.EndpointsList{} err = c.client.Get(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err // Watch returns a watch.Interface that watches the requested endpoints. func (c *endpoints) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *endpoints) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *endpoints) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go index 57d30f9fd49b8..55cfa0901b26c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *events) Get(name string, options metav1.GetOptions) (result *v1.Event, // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.EventList{} err = c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) // Watch returns a watch.Interface that watches the requested events. func (c *events) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *events) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *events) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("events"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD.bazel index 652da0db92586..1d8195d73b0b1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD.bazel @@ -33,8 +33,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/authentication/v1:go_default_library", + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go index d06023d48266a..18beedc2d3483 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go @@ -112,7 +112,7 @@ func (c *FakeComponentStatuses) DeleteCollection(options *v1.DeleteOptions, list // Patch applies the patch and returns the patched componentStatus. func (c *FakeComponentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ComponentStatus, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, data, subresources...), &corev1.ComponentStatus{}) + Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, pt, data, subresources...), &corev1.ComponentStatus{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go index b491661f2083f..2361ac3fe9df9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go @@ -119,7 +119,7 @@ func (c *FakeConfigMaps) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched configMap. func (c *FakeConfigMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ConfigMap, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, data, subresources...), &corev1.ConfigMap{}) + Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, pt, data, subresources...), &corev1.ConfigMap{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go index 2c9f0de0960d1..d521af4083a8b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go @@ -119,7 +119,7 @@ func (c *FakeEndpoints) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched endpoints. func (c *FakeEndpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Endpoints, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, data, subresources...), &corev1.Endpoints{}) + Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, pt, data, subresources...), &corev1.Endpoints{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go index 68405a54f1375..3444f4be961a4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go @@ -119,7 +119,7 @@ func (c *FakeEvents) DeleteCollection(options *v1.DeleteOptions, listOptions v1. // Patch applies the patch and returns the patched event. func (c *FakeEvents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, data, subresources...), &corev1.Event{}) + Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &corev1.Event{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go index dd3fb839289a4..4b4c90d7d1961 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go @@ -21,6 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" core "k8s.io/client-go/testing" ) @@ -52,10 +53,13 @@ func (c *FakeEvents) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error } // PatchWithEventNamespace patches an existing event. Returns the copy of the event the server returns, or an error. +// TODO: Should take a PatchType as an argument probably. func (c *FakeEvents) PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) { - action := core.NewRootPatchAction(eventsResource, event.Name, data) + // TODO: Should be configurable to support additional patch strategies. + pt := types.StrategicMergePatchType + action := core.NewRootPatchAction(eventsResource, event.Name, pt, data) if c.ns != "" { - action = core.NewPatchAction(eventsResource, c.ns, event.Name, data) + action = core.NewPatchAction(eventsResource, c.ns, event.Name, pt, data) } obj, err := c.Fake.Invokes(action, event) if obj == nil { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go index 03c03c5d0e20c..d110031f83255 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go @@ -119,7 +119,7 @@ func (c *FakeLimitRanges) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched limitRange. func (c *FakeLimitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.LimitRange, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, data, subresources...), &corev1.LimitRange{}) + Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, pt, data, subresources...), &corev1.LimitRange{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go index 12b918af01b6b..21387b5e254fc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go @@ -115,7 +115,7 @@ func (c *FakeNamespaces) Delete(name string, options *v1.DeleteOptions) error { // Patch applies the patch and returns the patched namespace. func (c *FakeNamespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Namespace, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, data, subresources...), &corev1.Namespace{}) + Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, pt, data, subresources...), &corev1.Namespace{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go index a2bc97b2c6c4a..bcde116a4e42c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go @@ -123,7 +123,7 @@ func (c *FakeNodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L // Patch applies the patch and returns the patched node. func (c *FakeNodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Node, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, data, subresources...), &corev1.Node{}) + Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, pt, data, subresources...), &corev1.Node{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go index eb684fd2951d2..a39022c83f443 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go @@ -18,12 +18,16 @@ package fake import ( "k8s.io/api/core/v1" + types "k8s.io/apimachinery/pkg/types" core "k8s.io/client-go/testing" ) +// TODO: Should take a PatchType as an argument probably. func (c *FakeNodes) PatchStatus(nodeName string, data []byte) (*v1.Node, error) { + // TODO: Should be configurable to support additional patch strategies. + pt := types.StrategicMergePatchType obj, err := c.Fake.Invokes( - core.NewRootPatchSubresourceAction(nodesResource, nodeName, data, "status"), &v1.Node{}) + core.NewRootPatchSubresourceAction(nodesResource, nodeName, pt, data, "status"), &v1.Node{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go index 71e2f2dc57658..843f323075e64 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go @@ -123,7 +123,7 @@ func (c *FakePersistentVolumes) DeleteCollection(options *v1.DeleteOptions, list // Patch applies the patch and returns the patched persistentVolume. func (c *FakePersistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PersistentVolume, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, data, subresources...), &corev1.PersistentVolume{}) + Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, pt, data, subresources...), &corev1.PersistentVolume{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go index a06eca81cb8f5..d2557c4c83c96 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go @@ -131,7 +131,7 @@ func (c *FakePersistentVolumeClaims) DeleteCollection(options *v1.DeleteOptions, // Patch applies the patch and returns the patched persistentVolumeClaim. func (c *FakePersistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PersistentVolumeClaim, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, data, subresources...), &corev1.PersistentVolumeClaim{}) + Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, pt, data, subresources...), &corev1.PersistentVolumeClaim{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go index bbf39eafc28ed..2dbecbbaaccf3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go @@ -131,7 +131,7 @@ func (c *FakePods) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li // Patch applies the patch and returns the patched pod. func (c *FakePods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Pod, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, data, subresources...), &corev1.Pod{}) + Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, pt, data, subresources...), &corev1.Pod{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go index 497cc7857059a..9c4b09d3e9f96 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go @@ -26,20 +26,31 @@ import ( func (c *FakePods) Bind(binding *v1.Binding) error { action := core.CreateActionImpl{} action.Verb = "create" + action.Namespace = binding.Namespace action.Resource = podsResource - action.Subresource = "bindings" + action.Subresource = "binding" action.Object = binding _, err := c.Fake.Invokes(action, binding) return err } +func (c *FakePods) GetBinding(name string) (result *v1.Binding, err error) { + obj, err := c.Fake. + Invokes(core.NewGetSubresourceAction(podsResource, c.ns, "binding", name), &v1.Binding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Binding), err +} + func (c *FakePods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { action := core.GenericActionImpl{} action.Verb = "get" action.Namespace = c.ns action.Resource = podsResource - action.Subresource = "logs" + action.Subresource = "log" action.Value = opts _, _ = c.Fake.Invokes(action, &v1.Pod{}) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go index ff242f1660f83..307f30594e2a3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go @@ -119,7 +119,7 @@ func (c *FakePodTemplates) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched podTemplate. func (c *FakePodTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PodTemplate, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, data, subresources...), &corev1.PodTemplate{}) + Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, pt, data, subresources...), &corev1.PodTemplate{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go index 64fde0b6cefe5..6de94c14829b9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go @@ -19,8 +19,8 @@ limitations under the License. package fake import ( + autoscalingv1 "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -132,7 +132,7 @@ func (c *FakeReplicationControllers) DeleteCollection(options *v1.DeleteOptions, // Patch applies the patch and returns the patched replicationController. func (c *FakeReplicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ReplicationController, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, data, subresources...), &corev1.ReplicationController{}) + Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, pt, data, subresources...), &corev1.ReplicationController{}) if obj == nil { return nil, err @@ -141,23 +141,23 @@ func (c *FakeReplicationControllers) Patch(name string, pt types.PatchType, data } // GetScale takes name of the replicationController, and returns the corresponding scale object, and an error if there is any. -func (c *FakeReplicationControllers) GetScale(replicationControllerName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { +func (c *FakeReplicationControllers) GetScale(replicationControllerName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(replicationcontrollersResource, c.ns, "scale", replicationControllerName), &v1beta1.Scale{}) + Invokes(testing.NewGetSubresourceAction(replicationcontrollersResource, c.ns, "scale", replicationControllerName), &autoscalingv1.Scale{}) if obj == nil { return nil, err } - return obj.(*v1beta1.Scale), err + return obj.(*autoscalingv1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicationControllers) UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { +func (c *FakeReplicationControllers) UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "scale", c.ns, scale), &v1beta1.Scale{}) + Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) if obj == nil { return nil, err } - return obj.(*v1beta1.Scale), err + return obj.(*autoscalingv1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go index 069749ccff02a..b521f7120bde0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go @@ -131,7 +131,7 @@ func (c *FakeResourceQuotas) DeleteCollection(options *v1.DeleteOptions, listOpt // Patch applies the patch and returns the patched resourceQuota. func (c *FakeResourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ResourceQuota, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, data, subresources...), &corev1.ResourceQuota{}) + Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, pt, data, subresources...), &corev1.ResourceQuota{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go index f59ba40bc3d45..47dba9eff4446 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go @@ -119,7 +119,7 @@ func (c *FakeSecrets) DeleteCollection(options *v1.DeleteOptions, listOptions v1 // Patch applies the patch and returns the patched secret. func (c *FakeSecrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Secret, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, data, subresources...), &corev1.Secret{}) + Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, pt, data, subresources...), &corev1.Secret{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go index 2ffcdff76a166..a65de49911870 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go @@ -123,7 +123,7 @@ func (c *FakeServices) Delete(name string, options *v1.DeleteOptions) error { // Patch applies the patch and returns the patched service. func (c *FakeServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Service, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, data, subresources...), &corev1.Service{}) + Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &corev1.Service{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go index 2b2c5a7b22c6d..5b6d8f8be51dc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go @@ -119,7 +119,7 @@ func (c *FakeServiceAccounts) DeleteCollection(options *v1.DeleteOptions, listOp // Patch applies the patch and returns the patched serviceAccount. func (c *FakeServiceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ServiceAccount, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, data, subresources...), &corev1.ServiceAccount{}) + Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, pt, data, subresources...), &corev1.ServiceAccount{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index 5b385668b8112..2eeae11a83783 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *limitRanges) Get(name string, options metav1.GetOptions) (result *v1.Li // List takes label and field selectors, and returns the list of LimitRanges that match those selectors. func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.LimitRangeList{} err = c.client.Get(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, // Watch returns a watch.Interface that watches the requested limitRanges. func (c *limitRanges) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *limitRanges) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *limitRanges) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index e22d07decee47..8a81fe850799d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *namespaces) Get(name string, options metav1.GetOptions) (result *v1.Nam // List takes label and field selectors, and returns the list of Namespaces that match those selectors. func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.NamespaceList{} err = c.client.Get(). Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, er // Watch returns a watch.Interface that watches the requested namespaces. func (c *namespaces) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go index 5c769c118572d..d19fab8952f83 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *nodes) Get(name string, options metav1.GetOptions) (result *v1.Node, er // List takes label and field selectors, and returns the list of Nodes that match those selectors. func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.NodeList{} err = c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { // Watch returns a watch.Interface that watches the requested nodes. func (c *nodes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *nodes) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *nodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("nodes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index d5f19aef52299..74514825e9877 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *persistentVolumes) Get(name string, options metav1.GetOptions) (result // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.PersistentVolumeList{} err = c.client.Get(). Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.Persistent // Watch returns a watch.Interface that watches the requested persistentVolumes. func (c *persistentVolumes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *persistentVolumes) Delete(name string, options *metav1.DeleteOptions) e // DeleteCollection deletes a collection of objects. func (c *persistentVolumes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("persistentvolumes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index d32ae5dfd84f0..410ab37dcba06 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *persistentVolumeClaims) Get(name string, options metav1.GetOptions) (re // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.PersistentVolumeClaimList{} err = c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.Persi // Watch returns a watch.Interface that watches the requested persistentVolumeClaims. func (c *persistentVolumeClaims) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *persistentVolumeClaims) Delete(name string, options *metav1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *persistentVolumeClaims) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index b19c5a5c3ea6b..8d6b6e879632d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *pods) Get(name string, options metav1.GetOptions) (result *v1.Pod, err // List takes label and field selectors, and returns the list of Pods that match those selectors. func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.PodList{} err = c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { // Watch returns a watch.Interface that watches the requested pods. func (c *pods) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *pods) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *pods) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("pods"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index d644e17d778ce..84d7c98059315 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *podTemplates) Get(name string, options metav1.GetOptions) (result *v1.P // List takes label and field selectors, and returns the list of PodTemplates that match those selectors. func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.PodTemplateList{} err = c.client.Get(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList // Watch returns a watch.Interface that watches the requested podTemplates. func (c *podTemplates) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *podTemplates) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *podTemplates) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index 17622f1c26096..dd3182db65bf2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -19,8 +19,10 @@ limitations under the License. package v1 import ( + "time" + + autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -45,8 +47,8 @@ type ReplicationControllerInterface interface { List(opts metav1.ListOptions) (*v1.ReplicationControllerList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) - GetScale(replicationControllerName string, options metav1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) + GetScale(replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) ReplicationControllerExpansion } @@ -80,11 +82,16 @@ func (c *replicationControllers) Get(name string, options metav1.GetOptions) (re // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ReplicationControllerList{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -92,11 +99,16 @@ func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.Repli // Watch returns a watch.Interface that watches the requested replicationControllers. func (c *replicationControllers) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -154,10 +166,15 @@ func (c *replicationControllers) Delete(name string, options *metav1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *replicationControllers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -177,9 +194,9 @@ func (c *replicationControllers) Patch(name string, pt types.PatchType, data []b return } -// GetScale takes name of the replicationController, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} +// GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). @@ -192,8 +209,8 @@ func (c *replicationControllers) GetScale(replicationControllerName string, opti } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} +func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 8b74a4046f0e0..5a178990ecd0d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *resourceQuotas) Get(name string, options metav1.GetOptions) (result *v1 // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ResourceQuotaList{} err = c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuota // Watch returns a watch.Interface that watches the requested resourceQuotas. func (c *resourceQuotas) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *resourceQuotas) Delete(name string, options *metav1.DeleteOptions) erro // DeleteCollection deletes a collection of objects. func (c *resourceQuotas) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index 4ea9796b63dce..85c143b173dda 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *secrets) Get(name string, options metav1.GetOptions) (result *v1.Secret // List takes label and field selectors, and returns the list of Secrets that match those selectors. func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.SecretList{} err = c.client.Get(). Namespace(c.ns). Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err erro // Watch returns a watch.Interface that watches the requested secrets. func (c *secrets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *secrets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *secrets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("secrets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go index 6c42ca87a8bd2..b0e09413efbbb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *services) Get(name string, options metav1.GetOptions) (result *v1.Servi // List takes label and field selectors, and returns the list of Services that match those selectors. func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ServiceList{} err = c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err er // Watch returns a watch.Interface that watches the requested services. func (c *services) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index f3ab7eb878c40..50af6a21c9bc0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *serviceAccounts) Get(name string, options metav1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ServiceAccountList{} err = c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccou // Watch returns a watch.Interface that watches the requested serviceAccounts. func (c *serviceAccounts) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *serviceAccounts) Delete(name string, options *metav1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *serviceAccounts) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go index af7d060d591e5..143281b25cc94 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.EventList{} err = c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error // Watch returns a watch.Interface that watches the requested events. func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *events) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("events"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go index b210e40a041c8..ef76ec1318173 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go @@ -119,7 +119,7 @@ func (c *FakeEvents) DeleteCollection(options *v1.DeleteOptions, listOptions v1. // Patch applies the patch and returns the patched event. func (c *FakeEvents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, data, subresources...), &v1beta1.Event{}) + Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1beta1.Event{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD.bazel index 627823b5ba9c8..3c5b3cbb3816c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD.bazel @@ -12,17 +12,13 @@ go_library( "ingress.go", "podsecuritypolicy.go", "replicaset.go", - "scale.go", - "scale_expansion.go", ], importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1", importpath = "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index 85294be4b98f2..93b1ae9b6dd7f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.Da // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, e // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index 89183d2853e0e..5557b9f2b1e85 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -79,11 +81,16 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -91,11 +98,16 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -153,10 +165,15 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 1961ffc7cda0a..0e9edf5cce60f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -32,7 +32,6 @@ type ExtensionsV1beta1Interface interface { IngressesGetter PodSecurityPoliciesGetter ReplicaSetsGetter - ScalesGetter } // ExtensionsV1beta1Client is used to interact with features provided by the extensions group. @@ -60,10 +59,6 @@ func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterf return newReplicaSets(c, namespace) } -func (c *ExtensionsV1beta1Client) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - // NewForConfig creates a new ExtensionsV1beta1Client for the given config. func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { config := *c diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/BUILD.bazel index 036cc70c6f8c0..c10131ef6d647 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/BUILD.bazel @@ -11,8 +11,6 @@ go_library( "fake_ingress.go", "fake_podsecuritypolicy.go", "fake_replicaset.go", - "fake_scale.go", - "fake_scale_expansion.go", ], importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake", importpath = "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go index 3a760b3175ea6..4c98660607c22 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -131,7 +131,7 @@ func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched daemonSet. func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, data, subresources...), &v1beta1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.DaemonSet{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go index f032a5563897f..7b7df45cc344e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go @@ -131,7 +131,7 @@ func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go index 1aba34f9dcb80..0282c0b49908b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go @@ -48,10 +48,6 @@ func (c *FakeExtensionsV1beta1) ReplicaSets(namespace string) v1beta1.ReplicaSet return &FakeReplicaSets{c, namespace} } -func (c *FakeExtensionsV1beta1) Scales(namespace string) v1beta1.ScaleInterface { - return &FakeScales{c, namespace} -} - // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeExtensionsV1beta1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go index 55257a88a2875..01c2521401ffa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go @@ -131,7 +131,7 @@ func (c *FakeIngresses) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched ingress. func (c *FakeIngresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, data, subresources...), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go index 70b5dac281932..b97a34416e3fe 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go @@ -112,7 +112,7 @@ func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched podSecurityPolicy. func (c *FakePodSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, data, subresources...), &v1beta1.PodSecurityPolicy{}) + Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, pt, data, subresources...), &v1beta1.PodSecurityPolicy{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go index 2ab8f244f5710..7ed16af904669 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -131,7 +131,7 @@ func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched replicaSet. func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, data, subresources...), &v1beta1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta1.ReplicaSet{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go deleted file mode 100644 index 02c4d0bab73ff..0000000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -// FakeScales implements ScaleInterface -type FakeScales struct { - Fake *FakeExtensionsV1beta1 - ns string -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go deleted file mode 100644 index 1f1d49ba1a9ff..0000000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime/schema" - core "k8s.io/client-go/testing" -) - -func (c *FakeScales) Get(kind string, name string) (result *v1beta1.Scale, err error) { - action := core.GetActionImpl{} - action.Verb = "get" - action.Namespace = c.ns - action.Resource = schema.GroupVersionResource{Resource: kind} - action.Subresource = "scale" - action.Name = name - obj, err := c.Fake.Invokes(action, &v1beta1.Scale{}) - result = obj.(*v1beta1.Scale) - return -} - -func (c *FakeScales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - action := core.UpdateActionImpl{} - action.Verb = "update" - action.Namespace = c.ns - action.Resource = schema.GroupVersionResource{Resource: kind} - action.Subresource = "scale" - action.Object = scale - obj, err := c.Fake.Invokes(action, scale) - result = obj.(*v1beta1.Scale) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index f8b664cbd1e4c..4da51c368585f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ing // List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.IngressList{} err = c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err // Watch returns a watch.Interface that watches the requested ingresses. func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go index 8099d77307d67..a947a54a6f429 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.PodSecurityPolicyList{} err = c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu // Watch returns a watch.Interface that watches the requested podSecurityPolicies. func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("podsecuritypolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index 7e61fa2d12f80..444029058b49e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -79,11 +81,16 @@ func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.R // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -91,11 +98,16 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -153,10 +165,15 @@ func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go deleted file mode 100644 index 6ee677acd290d..0000000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" -) - -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion -} - -// scales implements ScaleInterface -type scales struct { - client rest.Interface - ns string -} - -// newScales returns a Scales -func newScales(c *ExtensionsV1beta1Client, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go deleted file mode 100644 index c9733cb28d2e7..0000000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. -type ScaleExpansion interface { - Get(kind string, name string) (*v1beta1.Scale, error) - Update(kind string, scale *v1beta1.Scale) (*v1beta1.Scale, error) -} - -// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. -func (c *scales) Get(kind string, name string) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} - resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) - - err = c.client.Get(). - Namespace(c.ns). - Resource(resource.Resource). - Name(name). - SubResource("scale"). - Do(). - Into(result) - return -} - -func (c *scales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} - resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) - - err = c.client.Put(). - Namespace(scale.Namespace). - Resource(resource.Resource). - Name(scale.Name). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go index 7be202298de58..58667c481a82d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go @@ -119,7 +119,7 @@ func (c *FakeNetworkPolicies) DeleteCollection(options *v1.DeleteOptions, listOp // Patch applies the patch and returns the patched networkPolicy. func (c *FakeNetworkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *networkingv1.NetworkPolicy, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, data, subresources...), &networkingv1.NetworkPolicy{}) + Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &networkingv1.NetworkPolicy{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index d8f0a6b47e96a..3f39be957d8a8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *networkPolicies) Get(name string, options metav1.GetOptions) (result *v // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.NetworkPolicyList{} err = c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolic // Watch returns a watch.Interface that watches the requested networkPolicies. func (c *networkPolicies) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *networkPolicies) Delete(name string, options *metav1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *networkPolicies) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go index 2f0d8e95370a8..f3b5e93ab0d21 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go @@ -26,8 +26,9 @@ func (c *FakeEvictions) Evict(eviction *policy.Eviction) error { action := core.GetActionImpl{} action.Verb = "post" action.Namespace = c.ns - action.Resource = schema.GroupVersionResource{Group: "", Version: "", Resource: "pods"} + action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} action.Subresource = "eviction" + action.Name = eviction.Name _, err := c.Fake.Invokes(action, eviction) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go index 3f2e78b3109f1..5bfbbca47f218 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go @@ -131,7 +131,7 @@ func (c *FakePodDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, l // Patch applies the patch and returns the patched podDisruptionBudget. func (c *FakePodDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, data, subresources...), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &v1beta1.PodDisruptionBudget{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go index 0df9aa15f981f..32d1989f33f60 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go @@ -112,7 +112,7 @@ func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched podSecurityPolicy. func (c *FakePodSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, data, subresources...), &v1beta1.PodSecurityPolicy{}) + Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, pt, data, subresources...), &v1beta1.PodSecurityPolicy{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index a11f27eb2579d..864af9a262bb9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result * // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.PodDisruptionBudgetList{} err = c.client.Get(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDis // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) er // DeleteCollection deletes a collection of objects. func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go index 355be1e9c7fb3..d02096d747ae6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.PodSecurityPolicyList{} err = c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu // Watch returns a watch.Interface that watches the requested podSecurityPolicies. func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("podsecuritypolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go index c4299d4c68cae..0a47c44115030 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoles) Get(name string, options metav1.GetOptions) (result *v1.C // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoles) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go index 30c0469a4fc4d..c16ebc31222b4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoleBindings) Get(name string, options metav1.GetOptions) (resul // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterR // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoleBindings) Delete(name string, options *metav1.DeleteOptions) // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go index d93ac8294422f..d57f33939021f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go @@ -112,7 +112,7 @@ func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.ClusterRole, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &rbacv1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &rbacv1.ClusterRole{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go index a8b2b57ffd723..878473ef35d1e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go @@ -112,7 +112,7 @@ func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.ClusterRoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &rbacv1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &rbacv1.ClusterRoleBinding{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go index f048bbdfb2d88..78ef3192f3dbb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go @@ -119,7 +119,7 @@ func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.Role, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &rbacv1.Role{}) + Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &rbacv1.Role{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go index c71635fce4e63..6c344cadff96f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go @@ -119,7 +119,7 @@ func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.RoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &rbacv1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &rbacv1.RoleBinding{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go index 81ea12a9ff51f..a17d791fff243 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roles) Get(name string, options metav1.GetOptions) (result *v1.Role, er // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roles) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go index 17c6f9913b01b..c87e457188ef8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roleBindings) Get(name string, options metav1.GetOptions) (result *v1.R // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roleBindings) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index 37a54576231b3..77e66877e725b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1 // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleLi // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index 6050789066d94..0d1b9d2051eb5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.Cluste // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go index 13fbce4e72a56..d2d1b1c74c832 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go @@ -112,7 +112,7 @@ func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &v1alpha1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1alpha1.ClusterRole{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go index 5076543d9f42e..3e23e5f657068 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go @@ -112,7 +112,7 @@ func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &v1alpha1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1alpha1.ClusterRoleBinding{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go index 24d8efee3c1df..7bd52373faca3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go @@ -119,7 +119,7 @@ func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &v1alpha1.Role{}) + Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Role{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go index cb01ef99db700..0150503115629 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go @@ -119,7 +119,7 @@ func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &v1alpha1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1alpha1.RoleBinding{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index aa6954bb579d8..4a4b67240b3c7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index 0941b8e867137..bf4e5a10efb3c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1 // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingLi // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index bac951c876c32..21d3cab37339c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleLis // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index 96c91de6e2c8b..47eb9e4e77b15 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.Cluster // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go index 62a832197e78f..2dbc3f6166e8d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go @@ -112,7 +112,7 @@ func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &v1beta1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1beta1.ClusterRole{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go index c9ab472696415..14e20bc28c1ae 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go @@ -112,7 +112,7 @@ func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &v1beta1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1beta1.ClusterRoleBinding{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go index 45b07a001ed2d..e31768e4e506d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go @@ -119,7 +119,7 @@ func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &v1beta1.Role{}) + Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1beta1.Role{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go index 1efd400056028..06b93c93f66c4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go @@ -119,7 +119,7 @@ func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &v1beta1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1beta1.RoleBinding{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index 66f382c07c821..2b61aad5231b2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, e // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index 67d3d331bc264..0bd118fdfeb80 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingLis // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go index 8ab4421a97d5f..e592ed137faa4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go @@ -112,7 +112,7 @@ func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOp // Patch applies the patch and returns the patched priorityClass. func (c *FakePriorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, data, subresources...), &v1alpha1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1alpha1.PriorityClass{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go index 6845d25c3859d..29d646fb1f03e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1alp // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.PriorityClassList{} err = c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityCl // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("priorityclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go index e234fec66c1e5..44ce64b5ce063 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go @@ -112,7 +112,7 @@ func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOp // Patch applies the patch and returns the patched priorityClass. func (c *FakePriorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, data, subresources...), &v1beta1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1beta1.PriorityClass{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go index 57b9766e42ef6..5e402f8e342ae 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1bet // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.PriorityClassList{} err = c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityCla // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("priorityclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go index 90eaccec5a5be..273a027fadf02 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go @@ -119,7 +119,7 @@ func (c *FakePodPresets) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched podPreset. func (c *FakePodPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podpresetsResource, c.ns, name, data, subresources...), &v1alpha1.PodPreset{}) + Invokes(testing.NewPatchSubresourceAction(podpresetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.PodPreset{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go index f000ae486cdf4..8fd6adc56b6bb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/settings/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.P // List takes label and field selectors, and returns the list of PodPresets that match those selectors. func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.PodPresetList{} err = c.client.Get(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, // Watch returns a watch.Interface that watches the requested podPresets. func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD.bazel index da165100f730c..69dbfd8c3bd8b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD.bazel @@ -7,6 +7,7 @@ go_library( "generated_expansion.go", "storage_client.go", "storageclass.go", + "volumeattachment.go", ], importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/storage/v1", importpath = "k8s.io/client-go/kubernetes/typed/storage/v1", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/BUILD.bazel index feee2636aeb00..795ce14751be4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/BUILD.bazel @@ -6,6 +6,7 @@ go_library( "doc.go", "fake_storage_client.go", "fake_storageclass.go", + "fake_volumeattachment.go", ], importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake", importpath = "k8s.io/client-go/kubernetes/typed/storage/v1/fake", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go index fc6f98cf6ace2..967a528500e7a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go @@ -32,6 +32,10 @@ func (c *FakeStorageV1) StorageClasses() v1.StorageClassInterface { return &FakeStorageClasses{c} } +func (c *FakeStorageV1) VolumeAttachments() v1.VolumeAttachmentInterface { + return &FakeVolumeAttachments{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeStorageV1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go index 37488a2d7ad48..c7531d8793c12 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go @@ -112,7 +112,7 @@ func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOpt // Patch applies the patch and returns the patched storageClass. func (c *FakeStorageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storagev1.StorageClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &storagev1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &storagev1.StorageClass{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go new file mode 100644 index 0000000000000..58e09da46be8d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go @@ -0,0 +1,131 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + storagev1 "k8s.io/api/storage/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeAttachments implements VolumeAttachmentInterface +type FakeVolumeAttachments struct { + Fake *FakeStorageV1 +} + +var volumeattachmentsResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1", Resource: "volumeattachments"} + +var volumeattachmentsKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "VolumeAttachment"} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *FakeVolumeAttachments) Get(name string, options v1.GetOptions) (result *storagev1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &storagev1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.VolumeAttachment), err +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *FakeVolumeAttachments) List(opts v1.ListOptions) (result *storagev1.VolumeAttachmentList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &storagev1.VolumeAttachmentList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &storagev1.VolumeAttachmentList{ListMeta: obj.(*storagev1.VolumeAttachmentList).ListMeta} + for _, item := range obj.(*storagev1.VolumeAttachmentList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *FakeVolumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *FakeVolumeAttachments) Create(volumeAttachment *storagev1.VolumeAttachment) (result *storagev1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &storagev1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.VolumeAttachment), err +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *FakeVolumeAttachments) Update(volumeAttachment *storagev1.VolumeAttachment) (result *storagev1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &storagev1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.VolumeAttachment), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeVolumeAttachments) UpdateStatus(volumeAttachment *storagev1.VolumeAttachment) (*storagev1.VolumeAttachment, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &storagev1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.VolumeAttachment), err +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *FakeVolumeAttachments) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(volumeattachmentsResource, name), &storagev1.VolumeAttachment{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOptions) + + _, err := c.Fake.Invokes(action, &storagev1.VolumeAttachmentList{}) + return err +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *FakeVolumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storagev1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &storagev1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.VolumeAttachment), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go index 2bea7ec7fd98c..ccac16114c874 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go @@ -19,3 +19,5 @@ limitations under the License. package v1 type StorageClassExpansion interface{} + +type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index ac48f49169921..92378cf7f4967 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -28,6 +28,7 @@ import ( type StorageV1Interface interface { RESTClient() rest.Interface StorageClassesGetter + VolumeAttachmentsGetter } // StorageV1Client is used to interact with features provided by the storage.k8s.io group. @@ -39,6 +40,10 @@ func (c *StorageV1Client) StorageClasses() StorageClassInterface { return newStorageClasses(c) } +func (c *StorageV1Client) VolumeAttachments() VolumeAttachmentInterface { + return newVolumeAttachments(c) +} + // NewForConfig creates a new StorageV1Client for the given config. func NewForConfig(c *rest.Config) (*StorageV1Client, error) { config := *c diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index 0f7f57f05fbe1..3f4c48f0a0c48 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *storageClasses) Get(name string, options metav1.GetOptions) (result *v1 // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.StorageClassList{} err = c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassL // Watch returns a watch.Interface that watches the requested storageClasses. func (c *storageClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *storageClasses) Delete(name string, options *metav1.DeleteOptions) erro // DeleteCollection deletes a collection of objects. func (c *storageClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("storageclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go similarity index 61% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go rename to vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go index 2ce2c9784cf3d..0f45097b2009d 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go @@ -16,15 +16,17 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package internalversion +package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "time" + + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" - storage "k8s.io/kubernetes/pkg/apis/storage" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. @@ -35,15 +37,15 @@ type VolumeAttachmentsGetter interface { // VolumeAttachmentInterface has methods to work with VolumeAttachment resources. type VolumeAttachmentInterface interface { - Create(*storage.VolumeAttachment) (*storage.VolumeAttachment, error) - Update(*storage.VolumeAttachment) (*storage.VolumeAttachment, error) - UpdateStatus(*storage.VolumeAttachment) (*storage.VolumeAttachment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*storage.VolumeAttachment, error) - List(opts v1.ListOptions) (*storage.VolumeAttachmentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storage.VolumeAttachment, err error) + Create(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) + Update(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) + UpdateStatus(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.VolumeAttachment, error) + List(opts metav1.ListOptions) (*v1.VolumeAttachmentList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) VolumeAttachmentExpansion } @@ -53,15 +55,15 @@ type volumeAttachments struct { } // newVolumeAttachments returns a VolumeAttachments -func newVolumeAttachments(c *StorageClient) *volumeAttachments { +func newVolumeAttachments(c *StorageV1Client) *volumeAttachments { return &volumeAttachments{ client: c.RESTClient(), } } // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *storage.VolumeAttachment, err error) { - result = &storage.VolumeAttachment{} +func (c *volumeAttachments) Get(name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} err = c.client.Get(). Resource("volumeattachments"). Name(name). @@ -72,28 +74,38 @@ func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *sto } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(opts v1.ListOptions) (result *storage.VolumeAttachmentList, err error) { - result = &storage.VolumeAttachmentList{} +func (c *volumeAttachments) List(opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.VolumeAttachmentList{} err = c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return } // Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *volumeAttachments) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(volumeAttachment *storage.VolumeAttachment) (result *storage.VolumeAttachment, err error) { - result = &storage.VolumeAttachment{} +func (c *volumeAttachments) Create(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} err = c.client.Post(). Resource("volumeattachments"). Body(volumeAttachment). @@ -103,8 +115,8 @@ func (c *volumeAttachments) Create(volumeAttachment *storage.VolumeAttachment) ( } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(volumeAttachment *storage.VolumeAttachment) (result *storage.VolumeAttachment, err error) { - result = &storage.VolumeAttachment{} +func (c *volumeAttachments) Update(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). @@ -117,8 +129,8 @@ func (c *volumeAttachments) Update(volumeAttachment *storage.VolumeAttachment) ( // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeAttachments) UpdateStatus(volumeAttachment *storage.VolumeAttachment) (result *storage.VolumeAttachment, err error) { - result = &storage.VolumeAttachment{} +func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} err = c.client.Put(). Resource("volumeattachments"). Name(volumeAttachment.Name). @@ -130,7 +142,7 @@ func (c *volumeAttachments) UpdateStatus(volumeAttachment *storage.VolumeAttachm } // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error { +func (c *volumeAttachments) Delete(name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Resource("volumeattachments"). Name(name). @@ -140,18 +152,23 @@ func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error } // DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *volumeAttachments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("volumeattachments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() } // Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storage.VolumeAttachment, err error) { - result = &storage.VolumeAttachment{} +func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} err = c.client.Patch(pt). Resource("volumeattachments"). SubResource(subresources...). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go index af04b681c4f08..86f53e2d4dc05 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go @@ -123,7 +123,7 @@ func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, list // Patch applies the patch and returns the patched volumeAttachment. func (c *FakeVolumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, data, subresources...), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1alpha1.VolumeAttachment{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go index e6af001859400..7fef94e8d886e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1a // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.VolumeAttachmentList{} err = c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAt // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("volumeattachments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go index cbfbab1a35a35..9fc8ca991e35e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go @@ -112,7 +112,7 @@ func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOpt // Patch applies the patch and returns the patched storageClass. func (c *FakeStorageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &v1beta1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &v1beta1.StorageClass{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go index 04c0c463adca3..043098f45579d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go @@ -123,7 +123,7 @@ func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, list // Patch applies the patch and returns the patched volumeAttachment. func (c *FakeVolumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, data, subresources...), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1beta1.VolumeAttachment{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index fbe1fd4c215b2..8a8f38916194d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.StorageClassList{} err = c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClass // Watch returns a watch.Interface that watches the requested storageClasses. func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("storageclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go index 5cd2d3919f821..d319407f2723d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1b // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.VolumeAttachmentList{} err = c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAtt // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("volumeattachments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD.bazel index 74914feaf9930..8d2d4329eb18e 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD.bazel +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD.bazel @@ -6,7 +6,6 @@ go_library( "controllerrevision.go", "deployment.go", "expansion_generated.go", - "scale.go", "statefulset.go", "statefulset_expansion.go", ], diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go index 8f8d08434d8e2..c73cf98c7a591 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go @@ -33,11 +33,3 @@ type DeploymentListerExpansion interface{} // DeploymentNamespaceListerExpansion allows custom methods to be added to // DeploymentNamespaceLister. type DeploymentNamespaceListerExpansion interface{} - -// ScaleListerExpansion allows custom methods to be added to -// ScaleLister. -type ScaleListerExpansion interface{} - -// ScaleNamespaceListerExpansion allows custom methods to be added to -// ScaleNamespaceLister. -type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go deleted file mode 100644 index ef8a2630ec31c..0000000000000 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ScaleLister helps list Scales. -type ScaleLister interface { - // List lists all Scales in the indexer. - List(selector labels.Selector) (ret []*v1beta1.Scale, err error) - // Scales returns an object that can list and get Scales. - Scales(namespace string) ScaleNamespaceLister - ScaleListerExpansion -} - -// scaleLister implements the ScaleLister interface. -type scaleLister struct { - indexer cache.Indexer -} - -// NewScaleLister returns a new ScaleLister. -func NewScaleLister(indexer cache.Indexer) ScaleLister { - return &scaleLister{indexer: indexer} -} - -// List lists all Scales in the indexer. -func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Scale)) - }) - return ret, err -} - -// Scales returns an object that can list and get Scales. -func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { - return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ScaleNamespaceLister helps list and get Scales. -type ScaleNamespaceLister interface { - // List lists all Scales in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.Scale, err error) - // Get retrieves the Scale from the indexer for a given namespace and name. - Get(name string) (*v1beta1.Scale, error) - ScaleNamespaceListerExpansion -} - -// scaleNamespaceLister implements the ScaleNamespaceLister -// interface. -type scaleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Scales in the indexer for a given namespace. -func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Scale)) - }) - return ret, err -} - -// Get retrieves the Scale from the indexer for a given namespace and name. -func (s scaleNamespaceLister) Get(name string) (*v1beta1.Scale, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("scale"), name) - } - return obj.(*v1beta1.Scale), nil -} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD.bazel b/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD.bazel index f42ebfd887aa2..b83356b2cb418 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD.bazel +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD.bazel @@ -11,7 +11,6 @@ go_library( "expansion_generated.go", "replicaset.go", "replicaset_expansion.go", - "scale.go", "statefulset.go", "statefulset_expansion.go", ], diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go index d468f38e7c6d5..bac6ccb9a7a37 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go @@ -25,11 +25,3 @@ type ControllerRevisionListerExpansion interface{} // ControllerRevisionNamespaceListerExpansion allows custom methods to be added to // ControllerRevisionNamespaceLister. type ControllerRevisionNamespaceListerExpansion interface{} - -// ScaleListerExpansion allows custom methods to be added to -// ScaleLister. -type ScaleListerExpansion interface{} - -// ScaleNamespaceListerExpansion allows custom methods to be added to -// ScaleNamespaceLister. -type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go deleted file mode 100644 index d89329864ae74..0000000000000 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta2 - -import ( - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ScaleLister helps list Scales. -type ScaleLister interface { - // List lists all Scales in the indexer. - List(selector labels.Selector) (ret []*v1beta2.Scale, err error) - // Scales returns an object that can list and get Scales. - Scales(namespace string) ScaleNamespaceLister - ScaleListerExpansion -} - -// scaleLister implements the ScaleLister interface. -type scaleLister struct { - indexer cache.Indexer -} - -// NewScaleLister returns a new ScaleLister. -func NewScaleLister(indexer cache.Indexer) ScaleLister { - return &scaleLister{indexer: indexer} -} - -// List lists all Scales in the indexer. -func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta2.Scale, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.Scale)) - }) - return ret, err -} - -// Scales returns an object that can list and get Scales. -func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { - return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ScaleNamespaceLister helps list and get Scales. -type ScaleNamespaceLister interface { - // List lists all Scales in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta2.Scale, err error) - // Get retrieves the Scale from the indexer for a given namespace and name. - Get(name string) (*v1beta2.Scale, error) - ScaleNamespaceListerExpansion -} - -// scaleNamespaceLister implements the ScaleNamespaceLister -// interface. -type scaleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Scales in the indexer for a given namespace. -func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.Scale, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.Scale)) - }) - return ret, err -} - -// Get retrieves the Scale from the indexer for a given namespace and name. -func (s scaleNamespaceLister) Get(name string) (*v1beta2.Scale, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("scale"), name) - } - return obj.(*v1beta2.Scale), nil -} diff --git a/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/BUILD.bazel new file mode 100644 index 0000000000000..54c53576285d7 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "auditsink.go", + "expansion_generated.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1", + importpath = "k8s.io/client-go/listers/auditregistration/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/auditregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) diff --git a/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go b/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go new file mode 100644 index 0000000000000..3ae4528c8c357 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// AuditSinkLister helps list AuditSinks. +type AuditSinkLister interface { + // List lists all AuditSinks in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.AuditSink, err error) + // Get retrieves the AuditSink from the index for a given name. + Get(name string) (*v1alpha1.AuditSink, error) + AuditSinkListerExpansion +} + +// auditSinkLister implements the AuditSinkLister interface. +type auditSinkLister struct { + indexer cache.Indexer +} + +// NewAuditSinkLister returns a new AuditSinkLister. +func NewAuditSinkLister(indexer cache.Indexer) AuditSinkLister { + return &auditSinkLister{indexer: indexer} +} + +// List lists all AuditSinks in the indexer. +func (s *auditSinkLister) List(selector labels.Selector) (ret []*v1alpha1.AuditSink, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.AuditSink)) + }) + return ret, err +} + +// Get retrieves the AuditSink from the index for a given name. +func (s *auditSinkLister) Get(name string) (*v1alpha1.AuditSink, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("auditsink"), name) + } + return obj.(*v1alpha1.AuditSink), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go b/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/expansion_generated.go similarity index 75% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go rename to vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/expansion_generated.go index 2560f4a32d14b..533dd0631f96f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go +++ b/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/expansion_generated.go @@ -14,12 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by client-gen. DO NOT EDIT. +// Code generated by lister-gen. DO NOT EDIT. -package internalversion +package v1alpha1 -type DaemonSetExpansion interface{} - -type IngressExpansion interface{} - -type ReplicaSetExpansion interface{} +// AuditSinkListerExpansion allows custom methods to be added to +// AuditSinkLister. +type AuditSinkListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD.bazel index b99a48ba8015c..11a7cceaa84f6 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD.bazel +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD.bazel @@ -12,7 +12,6 @@ go_library( "podsecuritypolicy.go", "replicaset.go", "replicaset_expansion.go", - "scale.go", ], importmap = "k8s.io/kops/vendor/k8s.io/client-go/listers/extensions/v1beta1", importpath = "k8s.io/client-go/listers/extensions/v1beta1", diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go index b5ee8a49230f9..d5c2a7a7d2213 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go @@ -29,11 +29,3 @@ type IngressNamespaceListerExpansion interface{} // PodSecurityPolicyListerExpansion allows custom methods to be added to // PodSecurityPolicyLister. type PodSecurityPolicyListerExpansion interface{} - -// ScaleListerExpansion allows custom methods to be added to -// ScaleLister. -type ScaleListerExpansion interface{} - -// ScaleNamespaceListerExpansion allows custom methods to be added to -// ScaleNamespaceLister. -type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go deleted file mode 100644 index 527d4be424689..0000000000000 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ScaleLister helps list Scales. -type ScaleLister interface { - // List lists all Scales in the indexer. - List(selector labels.Selector) (ret []*v1beta1.Scale, err error) - // Scales returns an object that can list and get Scales. - Scales(namespace string) ScaleNamespaceLister - ScaleListerExpansion -} - -// scaleLister implements the ScaleLister interface. -type scaleLister struct { - indexer cache.Indexer -} - -// NewScaleLister returns a new ScaleLister. -func NewScaleLister(indexer cache.Indexer) ScaleLister { - return &scaleLister{indexer: indexer} -} - -// List lists all Scales in the indexer. -func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Scale)) - }) - return ret, err -} - -// Scales returns an object that can list and get Scales. -func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { - return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ScaleNamespaceLister helps list and get Scales. -type ScaleNamespaceLister interface { - // List lists all Scales in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.Scale, err error) - // Get retrieves the Scale from the indexer for a given namespace and name. - Get(name string) (*v1beta1.Scale, error) - ScaleNamespaceListerExpansion -} - -// scaleNamespaceLister implements the ScaleNamespaceLister -// interface. -type scaleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Scales in the indexer for a given namespace. -func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Scale)) - }) - return ret, err -} - -// Get retrieves the Scale from the indexer for a given namespace and name. -func (s scaleNamespaceLister) Get(name string) (*v1beta1.Scale, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("scale"), name) - } - return obj.(*v1beta1.Scale), nil -} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD.bazel index 8d47d0814f946..e4025ab883c3b 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD.bazel +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD.bazel @@ -13,12 +13,12 @@ go_library( importpath = "k8s.io/client-go/listers/policy/v1beta1", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go index c0ab9d3ed4ccb..d07d11a98dfc3 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go @@ -19,11 +19,11 @@ package v1beta1 import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/klog" ) // PodDisruptionBudgetListerExpansion allows custom methods to be added to @@ -54,7 +54,7 @@ func (s *podDisruptionBudgetLister) GetPodPodDisruptionBudgets(pod *v1.Pod) ([]* pdb := list[i] selector, err = metav1.LabelSelectorAsSelector(pdb.Spec.Selector) if err != nil { - glog.Warningf("invalid selector: %v", err) + klog.Warningf("invalid selector: %v", err) // TODO(mml): add an event to the PDB continue } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/BUILD.bazel b/vendor/k8s.io/client-go/listers/storage/v1/BUILD.bazel index a31a888c5f8d6..31da477590792 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/BUILD.bazel +++ b/vendor/k8s.io/client-go/listers/storage/v1/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "expansion_generated.go", "storageclass.go", + "volumeattachment.go", ], importmap = "k8s.io/kops/vendor/k8s.io/client-go/listers/storage/v1", importpath = "k8s.io/client-go/listers/storage/v1", diff --git a/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go index d932470649cc3..9d7d8887265ea 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go @@ -21,3 +21,7 @@ package v1 // StorageClassListerExpansion allows custom methods to be added to // StorageClassLister. type StorageClassListerExpansion interface{} + +// VolumeAttachmentListerExpansion allows custom methods to be added to +// VolumeAttachmentLister. +type VolumeAttachmentListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go new file mode 100644 index 0000000000000..14888812ec6ad --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeAttachmentLister helps list VolumeAttachments. +type VolumeAttachmentLister interface { + // List lists all VolumeAttachments in the indexer. + List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error) + // Get retrieves the VolumeAttachment from the index for a given name. + Get(name string) (*v1.VolumeAttachment, error) + VolumeAttachmentListerExpansion +} + +// volumeAttachmentLister implements the VolumeAttachmentLister interface. +type volumeAttachmentLister struct { + indexer cache.Indexer +} + +// NewVolumeAttachmentLister returns a new VolumeAttachmentLister. +func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { + return &volumeAttachmentLister{indexer: indexer} +} + +// List lists all VolumeAttachments in the indexer. +func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.VolumeAttachment)) + }) + return ret, err +} + +// Get retrieves the VolumeAttachment from the index for a given name. +func (s *volumeAttachmentLister) Get(name string) (*v1.VolumeAttachment, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("volumeattachment"), name) + } + return obj.(*v1.VolumeAttachment), nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS new file mode 100644 index 0000000000000..3b7ea1b131f25 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS @@ -0,0 +1,7 @@ +# approval on api packages bubbles to api-approvers +reviewers: +- sig-auth-authenticators-approvers +- sig-auth-authenticators-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go index d06482d554d25..b99459757e563 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=client.authentication.k8s.io + package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go index 016adb28a743d..19ab7761400fc 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io + package v1alpha1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go index fbcd9b7fea21d..22d1c588bc705 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io + package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go index 30399fb020161..05e997e133523 100644 --- a/vendor/k8s.io/client-go/pkg/version/doc.go +++ b/vendor/k8s.io/client-go/pkg/version/doc.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:openapi-gen=true + // Package version supplies version information collected at build time to // kubernetes components. -// +k8s:openapi-gen=true package version // import "k8s.io/client-go/pkg/version" diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/OWNERS b/vendor/k8s.io/client-go/plugin/pkg/client/auth/OWNERS new file mode 100644 index 0000000000000..c607d2aa8c5fe --- /dev/null +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-authenticators-approvers +reviewers: +- sig-auth-authenticators-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD.bazel b/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD.bazel index 357e41380f208..9c69745d5f4cc 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD.bazel +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD.bazel @@ -10,8 +10,8 @@ go_library( "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go index 60304b0f392fa..d42449fc2575a 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go @@ -17,6 +17,7 @@ limitations under the License. package azure import ( + "encoding/json" "errors" "fmt" "net/http" @@ -26,7 +27,7 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/net" restclient "k8s.io/client-go/rest" @@ -49,7 +50,7 @@ const ( func init() { if err := restclient.RegisterAuthProviderPlugin("azure", newAzureAuthProvider); err != nil { - glog.Fatalf("Failed to register azure auth plugin: %v", err) + klog.Fatalf("Failed to register azure auth plugin: %v", err) } } @@ -123,7 +124,7 @@ func (r *azureRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) token, err := r.tokenSource.Token() if err != nil { - glog.Errorf("Failed to acquire a token: %v", err) + klog.Errorf("Failed to acquire a token: %v", err) return nil, fmt.Errorf("acquiring a token for authorization header: %v", err) } @@ -243,9 +244,9 @@ func (ts *azureTokenSource) retrieveTokenFromCfg() (*azureToken, error) { token: adal.Token{ AccessToken: accessToken, RefreshToken: refreshToken, - ExpiresIn: expiresIn, - ExpiresOn: expiresOn, - NotBefore: expiresOn, + ExpiresIn: json.Number(expiresIn), + ExpiresOn: json.Number(expiresOn), + NotBefore: json.Number(expiresOn), Resource: fmt.Sprintf("spn:%s", apiserverID), Type: tokenType, }, @@ -262,8 +263,8 @@ func (ts *azureTokenSource) storeTokenInCfg(token *azureToken) error { newCfg[cfgClientID] = token.clientID newCfg[cfgTenantID] = token.tenantID newCfg[cfgApiserverID] = token.apiserverID - newCfg[cfgExpiresIn] = token.token.ExpiresIn - newCfg[cfgExpiresOn] = token.token.ExpiresOn + newCfg[cfgExpiresIn] = string(token.token.ExpiresIn) + newCfg[cfgExpiresOn] = string(token.token.ExpiresOn) err := ts.persister.Persist(newCfg) if err != nil { diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD.bazel b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD.bazel index 3f2e808dd47a1..8da0acbd1939c 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD.bazel +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD.bazel @@ -7,7 +7,6 @@ go_library( importpath = "k8s.io/client-go/plugin/pkg/client/auth/exec", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -20,5 +19,6 @@ go_library( "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//vendor/k8s.io/client-go/transport:go_default_library", "//vendor/k8s.io/client-go/util/connrotation:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index cae9d0d618e78..4d72526583ef6 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -31,7 +31,6 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/crypto/ssh/terminal" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -44,6 +43,7 @@ import ( "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/transport" "k8s.io/client-go/util/connrotation" + "k8s.io/klog" ) const execInfoEnv = "KUBERNETES_EXEC_INFO" @@ -228,7 +228,7 @@ func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { Code: int32(res.StatusCode), } if err := r.a.maybeRefreshCreds(creds, resp); err != nil { - glog.Errorf("refreshing credentials: %v", err) + klog.Errorf("refreshing credentials: %v", err) } } return res, nil diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD.bazel b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD.bazel index 503f57eac7ae3..ce7b09baf4ec2 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD.bazel +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD.bazel @@ -7,12 +7,12 @@ go_library( importpath = "k8s.io/client-go/plugin/pkg/client/auth/gcp", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/golang.org/x/oauth2/google:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/util/jsonpath:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go index 2bc6c4474bbaf..e44c2adabb3a2 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go @@ -27,18 +27,18 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/yaml" restclient "k8s.io/client-go/rest" "k8s.io/client-go/util/jsonpath" + "k8s.io/klog" ) func init() { if err := restclient.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil { - glog.Fatalf("Failed to register gcp auth plugin: %v", err) + klog.Fatalf("Failed to register gcp auth plugin: %v", err) } } @@ -223,7 +223,7 @@ func (t *cachedTokenSource) Token() (*oauth2.Token, error) { cache := t.update(tok) if t.persister != nil { if err := t.persister.Persist(cache); err != nil { - glog.V(4).Infof("Failed to persist token: %v", err) + klog.V(4).Infof("Failed to persist token: %v", err) } } return tok, nil @@ -329,7 +329,7 @@ func (c *commandTokenSource) parseTokenCmdOutput(output []byte) (*oauth2.Token, } var expiry time.Time if t, err := time.Parse(c.timeFmt, expiryStr); err != nil { - glog.V(4).Infof("Failed to parse token expiry from %s (fmt=%s): %v", expiryStr, c.timeFmt, err) + klog.V(4).Infof("Failed to parse token expiry from %s (fmt=%s): %v", expiryStr, c.timeFmt, err) } else { expiry = t } @@ -373,7 +373,7 @@ func (t *conditionalTransport) RoundTrip(req *http.Request) (*http.Response, err } if res.StatusCode == 401 { - glog.V(4).Infof("The credentials that were supplied are invalid for the target cluster") + klog.V(4).Infof("The credentials that were supplied are invalid for the target cluster") t.persister.Persist(t.resetCache) } diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD.bazel b/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD.bazel index cafb7c91307da..7b0242566b37c 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD.bazel +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD.bazel @@ -7,9 +7,9 @@ go_library( importpath = "k8s.io/client-go/plugin/pkg/client/auth/oidc", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go index 9c3ea0ab8d53f..1383a97c62eb1 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go @@ -28,10 +28,10 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/oauth2" "k8s.io/apimachinery/pkg/util/net" restclient "k8s.io/client-go/rest" + "k8s.io/klog" ) const ( @@ -49,7 +49,7 @@ const ( func init() { if err := restclient.RegisterAuthProviderPlugin("oidc", newOIDCAuthProvider); err != nil { - glog.Fatalf("Failed to register oidc auth plugin: %v", err) + klog.Fatalf("Failed to register oidc auth plugin: %v", err) } } @@ -124,7 +124,7 @@ func newOIDCAuthProvider(_ string, cfg map[string]string, persister restclient.A } if len(cfg[cfgExtraScopes]) > 0 { - glog.V(2).Infof("%s auth provider field depricated, refresh request don't send scopes", + klog.V(2).Infof("%s auth provider field depricated, refresh request don't send scopes", cfgExtraScopes) } diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD.bazel b/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD.bazel index a044544f5715b..77e97aac19a69 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD.bazel +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD.bazel @@ -7,10 +7,10 @@ go_library( importpath = "k8s.io/client-go/plugin/pkg/client/auth/openstack", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/gophercloud/gophercloud:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go index e6d7f04934aa6..fab5104ef61cc 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go @@ -22,9 +22,9 @@ import ( "sync" "time" - "github.com/golang/glog" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/net" restclient "k8s.io/client-go/rest" @@ -32,7 +32,7 @@ import ( func init() { if err := restclient.RegisterAuthProviderPlugin("openstack", newOpenstackAuthProvider); err != nil { - glog.Fatalf("Failed to register openstack auth plugin: %s", err) + klog.Fatalf("Failed to register openstack auth plugin: %s", err) } } @@ -62,7 +62,7 @@ func (t *tokenGetter) Token() (string, error) { var err error if t.authOpt == nil { // reads the config from the environment - glog.V(4).Info("reading openstack config from the environment variables") + klog.V(4).Info("reading openstack config from the environment variables") options, err = openstack.AuthOptionsFromEnv() if err != nil { return "", fmt.Errorf("failed to read openstack env vars: %s", err) @@ -126,7 +126,7 @@ func (t *tokenRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) if err == nil { req.Header.Set("Authorization", "Bearer "+token) } else { - glog.V(4).Infof("failed to get token: %s", err) + klog.V(4).Infof("failed to get token: %s", err) } return t.RoundTripper.RoundTrip(req) @@ -140,7 +140,7 @@ func newOpenstackAuthProvider(_ string, config map[string]string, persister rest var ttlDuration time.Duration var err error - glog.Warningf("WARNING: in-tree openstack auth plugin is now deprecated. please use the \"client-keystone-auth\" kubectl/client-go credential plugin instead") + klog.Warningf("WARNING: in-tree openstack auth plugin is now deprecated. please use the \"client-keystone-auth\" kubectl/client-go credential plugin instead") ttl, found := config["ttl"] if !found { ttlDuration = DefaultTTLDuration diff --git a/vendor/k8s.io/client-go/rest/BUILD.bazel b/vendor/k8s.io/client-go/rest/BUILD.bazel index 2e07642f59f87..4c0b7ef30ccc5 100644 --- a/vendor/k8s.io/client-go/rest/BUILD.bazel +++ b/vendor/k8s.io/client-go/rest/BUILD.bazel @@ -7,7 +7,6 @@ go_library( "config.go", "plugin.go", "request.go", - "token_source.go", "transport.go", "url_utils.go", "urlbackoff.go", @@ -17,9 +16,7 @@ go_library( importpath = "k8s.io/client-go/rest", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", - "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -37,5 +34,6 @@ go_library( "//vendor/k8s.io/client-go/transport:go_default_library", "//vendor/k8s.io/client-go/util/cert:go_default_library", "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 87e87905523c2..072e7392b1911 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -29,7 +29,6 @@ import ( "strings" "time" - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -37,6 +36,7 @@ import ( clientcmdapi "k8s.io/client-go/tools/clientcmd/api" certutil "k8s.io/client-go/util/cert" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" ) const ( @@ -70,6 +70,11 @@ type Config struct { // TODO: demonstrate an OAuth2 compatible client. BearerToken string + // Path to a file containing a BearerToken. + // If set, the contents are periodically read. + // The last successfully read value takes precedence over BearerToken. + BearerTokenFile string + // Impersonate is the configuration that RESTClient will use for impersonation. Impersonate ImpersonationConfig @@ -322,16 +327,15 @@ func InClusterConfig() (*Config, error) { return nil, ErrNotInCluster } - ts := newCachedPathTokenSource(tokenFile) - - if _, err := ts.Token(); err != nil { + token, err := ioutil.ReadFile(tokenFile) + if err != nil { return nil, err } tlsClientConfig := TLSClientConfig{} if _, err := certutil.NewPool(rootCAFile); err != nil { - glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) + klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) } else { tlsClientConfig.CAFile = rootCAFile } @@ -340,7 +344,8 @@ func InClusterConfig() (*Config, error) { // TODO: switch to using cluster DNS. Host: "https://" + net.JoinHostPort(host, port), TLSClientConfig: tlsClientConfig, - WrapTransport: TokenSourceWrapTransport(ts), + BearerToken: string(token), + BearerTokenFile: tokenFile, }, nil } @@ -430,12 +435,13 @@ func AnonymousClientConfig(config *Config) *Config { // CopyConfig returns a copy of the given config func CopyConfig(config *Config) *Config { return &Config{ - Host: config.Host, - APIPath: config.APIPath, - ContentConfig: config.ContentConfig, - Username: config.Username, - Password: config.Password, - BearerToken: config.BearerToken, + Host: config.Host, + APIPath: config.APIPath, + ContentConfig: config.ContentConfig, + Username: config.Username, + Password: config.Password, + BearerToken: config.BearerToken, + BearerTokenFile: config.BearerTokenFile, Impersonate: ImpersonationConfig{ Groups: config.Impersonate.Groups, Extra: config.Impersonate.Extra, diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go index cf8fbabfdf1cf..83ef5ae320fea 100644 --- a/vendor/k8s.io/client-go/rest/plugin.go +++ b/vendor/k8s.io/client-go/rest/plugin.go @@ -21,7 +21,7 @@ import ( "net/http" "sync" - "github.com/golang/glog" + "k8s.io/klog" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) @@ -57,7 +57,7 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error { if _, found := plugins[name]; found { return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) } - glog.V(4).Infof("Registered Auth Provider Plugin %q", name) + klog.V(4).Infof("Registered Auth Provider Plugin %q", name) plugins[name] = plugin return nil } diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 9bb311448ab41..64901fba20d88 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -32,7 +32,6 @@ import ( "strings" "time" - "github.com/golang/glog" "golang.org/x/net/http2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,6 +43,7 @@ import ( restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" ) var ( @@ -114,7 +114,7 @@ type Request struct { // NewRequest creates a new request helper object for accessing runtime.Objects on a server. func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request { if backoff == nil { - glog.V(2).Infof("Not implementing request backoff strategy.") + klog.V(2).Infof("Not implementing request backoff strategy.") backoff = &NoBackoff{} } @@ -527,7 +527,7 @@ func (r *Request) tryThrottle() { r.throttle.Accept() } if latency := time.Since(now); latency > longThrottleLatency { - glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) + klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) } } @@ -683,7 +683,7 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { }() if r.err != nil { - glog.V(4).Infof("Error in request: %v", r.err) + klog.V(4).Infof("Error in request: %v", r.err) return r.err } @@ -770,13 +770,13 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { if seeker, ok := r.body.(io.Seeker); ok && r.body != nil { _, err := seeker.Seek(0, 0) if err != nil { - glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) + klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) fn(req, resp) return true } } - glog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) + klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) r.backoffMgr.Sleep(time.Duration(seconds) * time.Second) return false } @@ -844,13 +844,13 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu // 2. Apiserver sends back the headers and then part of the body // 3. Apiserver closes connection. // 4. client-go should catch this and return an error. - glog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) + klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err) return Result{ err: streamErr, } default: - glog.Errorf("Unexpected error when reading response body: %#v", err) + klog.Errorf("Unexpected error when reading response body: %#v", err) unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err) return Result{ err: unexpectedErr, @@ -914,11 +914,11 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu func truncateBody(body string) string { max := 0 switch { - case bool(glog.V(10)): + case bool(klog.V(10)): return body - case bool(glog.V(9)): + case bool(klog.V(9)): max = 10240 - case bool(glog.V(8)): + case bool(klog.V(8)): max = 1024 } @@ -933,13 +933,13 @@ func truncateBody(body string) string { // allocating a new string for the body output unless necessary. Uses a simple heuristic to determine // whether the body is printable. func glogBody(prefix string, body []byte) { - if glog.V(8) { + if klog.V(8) { if bytes.IndexFunc(body, func(r rune) bool { return r < 0x0a }) != -1 { - glog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) + klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) } else { - glog.Infof("%s: %s", prefix, truncateBody(string(body))) + klog.Infof("%s: %s", prefix, truncateBody(string(body))) } } } @@ -1141,7 +1141,7 @@ func (r Result) Error() error { // to be backwards compatible with old servers that do not return a version, default to "v1" out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil) if err != nil { - glog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) + klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) return r.err } switch t := out.(type) { diff --git a/vendor/k8s.io/client-go/rest/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go index eff848abc12b6..d00e42f86671f 100644 --- a/vendor/k8s.io/client-go/rest/urlbackoff.go +++ b/vendor/k8s.io/client-go/rest/urlbackoff.go @@ -20,9 +20,9 @@ import ( "net/url" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" ) // Set of resp. Codes that we backoff for. @@ -64,7 +64,7 @@ func (n *NoBackoff) Sleep(d time.Duration) { // Disable makes the backoff trivial, i.e., sets it to zero. This might be used // by tests which want to run 1000s of mock requests without slowing down. func (b *URLBackoff) Disable() { - glog.V(4).Infof("Disabling backoff strategy") + klog.V(4).Infof("Disabling backoff strategy") b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second) } @@ -76,7 +76,7 @@ func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string { // in the future. host, err := url.Parse(rawurl.String()) if err != nil { - glog.V(4).Infof("Error extracting url: %v", rawurl) + klog.V(4).Infof("Error extracting url: %v", rawurl) panic("bad url!") } return host.Host @@ -89,7 +89,7 @@ func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode i b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now()) return } else if responseCode >= 300 || err != nil { - glog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) + klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) } //If we got this far, there is no backoff required for this URL anymore. diff --git a/vendor/k8s.io/client-go/restmapper/BUILD.bazel b/vendor/k8s.io/client-go/restmapper/BUILD.bazel index 1e26b7a1206af..182b13062db79 100644 --- a/vendor/k8s.io/client-go/restmapper/BUILD.bazel +++ b/vendor/k8s.io/client-go/restmapper/BUILD.bazel @@ -11,10 +11,10 @@ go_library( importpath = "k8s.io/client-go/restmapper", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/restmapper/discovery.go b/vendor/k8s.io/client-go/restmapper/discovery.go index aa158626af4fb..84491f4c5d16c 100644 --- a/vendor/k8s.io/client-go/restmapper/discovery.go +++ b/vendor/k8s.io/client-go/restmapper/discovery.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" - "github.com/golang/glog" + "k8s.io/klog" ) // APIGroupResources is an API group with a mapping of versions to @@ -212,7 +212,7 @@ func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { // Reset resets the internally cached Discovery information and will // cause the next mapping request to re-discover. func (d *DeferredDiscoveryRESTMapper) Reset() { - glog.V(5).Info("Invalidating discovery information") + klog.V(5).Info("Invalidating discovery information") d.initMu.Lock() defer d.initMu.Unlock() diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go index d9f4be0b6b113..6f3c9d9306917 100644 --- a/vendor/k8s.io/client-go/restmapper/shortcut.go +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -19,7 +19,7 @@ package restmapper import ( "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -86,12 +86,12 @@ func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []re // This can return an error *and* the results it was able to find. We don't need to fail on the error. apiResList, err := e.discoveryClient.ServerResources() if err != nil { - glog.V(1).Infof("Error loading discovery information: %v", err) + klog.V(1).Infof("Error loading discovery information: %v", err) } for _, apiResources := range apiResList { gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) if err != nil { - glog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) + klog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) continue } for _, apiRes := range apiResources.APIResources { diff --git a/vendor/k8s.io/client-go/scale/interfaces.go b/vendor/k8s.io/client-go/scale/interfaces.go index 4668c7417d183..13f2cfb8e786c 100644 --- a/vendor/k8s.io/client-go/scale/interfaces.go +++ b/vendor/k8s.io/client-go/scale/interfaces.go @@ -34,6 +34,6 @@ type ScaleInterface interface { // Get fetches the scale of the given scalable resource. Get(resource schema.GroupResource, name string) (*autoscalingapi.Scale, error) - // Update updates the scale of the the given scalable resource. + // Update updates the scale of the given scalable resource. Update(resource schema.GroupResource, scale *autoscalingapi.Scale) (*autoscalingapi.Scale, error) } diff --git a/vendor/k8s.io/client-go/testing/BUILD.bazel b/vendor/k8s.io/client-go/testing/BUILD.bazel index 393d4e8f22232..2a45e25364321 100644 --- a/vendor/k8s.io/client-go/testing/BUILD.bazel +++ b/vendor/k8s.io/client-go/testing/BUILD.bazel @@ -11,6 +11,7 @@ go_library( importpath = "k8s.io/client-go/testing", visibility = ["//visibility:public"], deps = [ + "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -18,6 +19,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go index b99f231c8d7bf..e6db578ed8fe0 100644 --- a/vendor/k8s.io/client-go/testing/actions.go +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" ) func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { @@ -152,45 +153,49 @@ func NewUpdateAction(resource schema.GroupVersionResource, namespace string, obj return action } -func NewRootPatchAction(resource schema.GroupVersionResource, name string, patch []byte) PatchActionImpl { +func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Name = name + action.PatchType = pt action.Patch = patch return action } -func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, patch []byte) PatchActionImpl { +func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Namespace = namespace action.Name = name + action.PatchType = pt action.Patch = patch return action } -func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, patch []byte, subresources ...string) PatchActionImpl { +func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Subresource = path.Join(subresources...) action.Name = name + action.PatchType = pt action.Patch = patch return action } -func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, patch []byte, subresources ...string) PatchActionImpl { +func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Subresource = path.Join(subresources...) action.Namespace = namespace action.Name = name + action.PatchType = pt action.Patch = patch return action @@ -396,6 +401,7 @@ type DeleteCollectionAction interface { type PatchAction interface { Action GetName() string + GetPatchType() types.PatchType GetPatch() []byte } @@ -537,8 +543,9 @@ func (a UpdateActionImpl) DeepCopy() Action { type PatchActionImpl struct { ActionImpl - Name string - Patch []byte + Name string + PatchType types.PatchType + Patch []byte } func (a PatchActionImpl) GetName() string { @@ -549,12 +556,17 @@ func (a PatchActionImpl) GetPatch() []byte { return a.Patch } +func (a PatchActionImpl) GetPatchType() types.PatchType { + return a.PatchType +} + func (a PatchActionImpl) DeepCopy() Action { patch := make([]byte, len(a.Patch)) copy(patch, a.Patch) return PatchActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), Name: a.Name, + PatchType: a.PatchType, Patch: patch, } } diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go index 00c4c49fce454..90f16f560806b 100644 --- a/vendor/k8s.io/client-go/testing/fixture.go +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -20,11 +20,13 @@ import ( "fmt" "sync" + "github.com/evanphx/json-patch" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" @@ -137,15 +139,30 @@ func ObjectReaction(tracker ObjectTracker) ReactionFunc { if err != nil { return true, nil, err } - // Only supports strategic merge patch - // TODO: Add support for other Patch types - mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) - if err != nil { - return true, nil, err - } - - if err = json.Unmarshal(mergedByte, obj); err != nil { - return true, nil, err + // Only supports strategic merge patch and JSONPatch as coded. + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return true, nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return true, nil, err + } + if err = json.Unmarshal(modified, obj); err != nil { + return true, nil, err + } + case types.StrategicMergePatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return true, nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return true, nil, err + } + default: + return true, nil, fmt.Errorf("PatchType is not supported") } if err = tracker.Update(gvr, obj, ns); err != nil { diff --git a/vendor/k8s.io/client-go/tools/auth/OWNERS b/vendor/k8s.io/client-go/tools/auth/OWNERS new file mode 100644 index 0000000000000..c607d2aa8c5fe --- /dev/null +++ b/vendor/k8s.io/client-go/tools/auth/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-authenticators-approvers +reviewers: +- sig-auth-authenticators-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/tools/cache/BUILD.bazel b/vendor/k8s.io/client-go/tools/cache/BUILD.bazel index 28326a311d3b1..8b3a43d89e2ff 100644 --- a/vendor/k8s.io/client-go/tools/cache/BUILD.bazel +++ b/vendor/k8s.io/client-go/tools/cache/BUILD.bazel @@ -27,7 +27,6 @@ go_library( importpath = "k8s.io/client-go/tools/cache", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -47,5 +46,6 @@ go_library( "//vendor/k8s.io/client-go/tools/pager:go_default_library", "//vendor/k8s.io/client-go/util/buffer:go_default_library", "//vendor/k8s.io/client-go/util/retry:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 45c3b500d427e..f818a293a6943 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - "github.com/golang/glog" + "k8s.io/klog" ) // NewDeltaFIFO returns a Store which can be used process changes to items. @@ -320,17 +320,15 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err newDeltas := append(f.items[id], Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) - _, exists := f.items[id] if len(newDeltas) > 0 { - if !exists { + if _, exists := f.items[id]; !exists { f.queue = append(f.queue, id) } f.items[id] = newDeltas f.cond.Broadcast() - } else if exists { - // We need to remove this from our map (extra items - // in the queue are ignored if they are not in the - // map). + } else { + // We need to remove this from our map (extra items in the queue are + // ignored if they are not in the map). delete(f.items, id) } return nil @@ -348,9 +346,6 @@ func (f *DeltaFIFO) List() []interface{} { func (f *DeltaFIFO) listLocked() []interface{} { list := make([]interface{}, 0, len(f.items)) for _, item := range f.items { - // Copy item's slice so operations on this slice - // won't interfere with the object we return. - item = copyDeltas(item) list = append(list, item.Newest().Object) } return list @@ -398,10 +393,7 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err func (f *DeltaFIFO) IsClosed() bool { f.closedLock.Lock() defer f.closedLock.Unlock() - if f.closed { - return true - } - return false + return f.closed } // Pop blocks until an item is added to the queue, and then returns it. If @@ -432,10 +424,10 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { } id := f.queue[0] f.queue = f.queue[1:] - item, ok := f.items[id] if f.initialPopulationCount > 0 { f.initialPopulationCount-- } + item, ok := f.items[id] if !ok { // Item may have been deleted subsequently. continue @@ -506,10 +498,10 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { deletedObj, exists, err := f.knownObjects.GetByKey(k) if err != nil { deletedObj = nil - glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) + klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) } else if !exists { deletedObj = nil - glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) } queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { @@ -553,10 +545,10 @@ func (f *DeltaFIFO) syncKey(key string) error { func (f *DeltaFIFO) syncKeyLocked(key string) error { obj, exists, err := f.knownObjects.GetByKey(key) if err != nil { - glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) + klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) return nil } else if !exists { - glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) + klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) return nil } diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go index fa88fc407e392..b38fe70b95669 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -20,8 +20,8 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/klog" ) // ExpirationCache implements the store interface @@ -95,7 +95,7 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { return nil, false } if c.expirationPolicy.IsExpired(timestampedItem) { - glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj) + klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj) c.cacheStorage.Delete(key) return nil, false } @@ -179,7 +179,7 @@ func (c *ExpirationCache) Delete(obj interface{}) error { func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error { c.expirationLock.Lock() defer c.expirationLock.Unlock() - items := map[string]interface{}{} + items := make(map[string]interface{}, len(list)) ts := c.clock.Now() for _, item := range list { key, err := c.keyFunc(item) diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index e05c01ee2960d..508c5530c36fb 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -297,7 +297,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { // after calling this function. f's queue is reset, too; upon return, it // will contain the items in the map, in no particular order. func (f *FIFO) Replace(list []interface{}, resourceVersion string) error { - items := map[string]interface{}{} + items := make(map[string]interface{}, len(list)) for _, item := range list { key, err := f.keyFunc(item) if err != nil { diff --git a/vendor/k8s.io/client-go/tools/cache/heap.go b/vendor/k8s.io/client-go/tools/cache/heap.go index 78e492455ea65..7357ff97a1f4c 100644 --- a/vendor/k8s.io/client-go/tools/cache/heap.go +++ b/vendor/k8s.io/client-go/tools/cache/heap.go @@ -204,7 +204,7 @@ func (h *Heap) AddIfNotPresent(obj interface{}) error { return nil } -// addIfNotPresentLocked assumes the lock is already held and adds the the provided +// addIfNotPresentLocked assumes the lock is already held and adds the provided // item to the queue if it does not already exist. func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) { if _, exists := h.data.items[key]; exists { diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go index 27d51a6b38797..ce377329c7f1c 100644 --- a/vendor/k8s.io/client-go/tools/cache/listers.go +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -17,7 +17,7 @@ limitations under the License. package cache import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -60,7 +60,7 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) if err != nil { // Ignore error; do slow search without index. - glog.Warningf("can not retrieve list of objects using index : %v", err) + klog.Warningf("can not retrieve list of objects using index : %v", err) for _, m := range indexer.List() { metadata, err := meta.Accessor(m) if err != nil { diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go index cbb6434ebde2a..4c6686e918c14 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -156,7 +156,7 @@ func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, er } elements, err := fn(updated) if err != nil { - glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) + klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) continue } for _, inIndex := range elements { diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go index e2aa448484087..adb5b8be8af46 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/diff" @@ -45,7 +45,7 @@ func NewCacheMutationDetector(name string) CacheMutationDetector { if !mutationDetectionEnabled { return dummyMutationDetector{} } - glog.Warningln("Mutation detector is enabled, this will result in memory leakage.") + klog.Warningln("Mutation detector is enabled, this will result in memory leakage.") return &defaultCacheMutationDetector{name: name, period: 1 * time.Second} } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 9ee7efcbbd822..56d0f52330630 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -24,14 +24,11 @@ import ( "net" "net/url" "reflect" - "strconv" "strings" "sync" - "sync/atomic" "syscall" "time" - "github.com/golang/glog" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -41,6 +38,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog" ) // Reflector watches a specified resource and causes all changes to be reflected in the given store. @@ -95,17 +93,10 @@ func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyn return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod) } -// reflectorDisambiguator is used to disambiguate started reflectors. -// initialized to an unstable value to ensure meaning isn't attributed to the suffix. -var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345) - // NewNamedReflector same as NewReflector, but with a specified name for logging func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1) r := &Reflector{ - name: name, - // we need this to be unique per process (some names are still the same) but obvious who it belongs to - metrics: newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))), + name: name, listerWatcher: lw, store: store, expectedType: reflect.TypeOf(expectedType), @@ -128,7 +119,7 @@ var internalPackages = []string{"client-go/tools/cache/"} // Run starts a watch and handles watch events. Will restart the watch if it is closed. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) + klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) wait.Until(func() { if err := r.ListAndWatch(stopCh); err != nil { utilruntime.HandleError(err) @@ -166,20 +157,17 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { - glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) + klog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) var resourceVersion string // Explicitly set "0" as resource version - it's fine for the List() // to be served from cache and potentially be delayed relative to // etcd contents. Reflector framework will catch up via Watch() eventually. options := metav1.ListOptions{ResourceVersion: "0"} - r.metrics.numberOfLists.Inc() - start := r.clock.Now() list, err := r.listerWatcher.List(options) if err != nil { return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) } - r.metrics.listDuration.Observe(time.Since(start).Seconds()) listMetaInterface, err := meta.ListAccessor(list) if err != nil { return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err) @@ -189,7 +177,6 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { if err != nil { return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err) } - r.metrics.numberOfItemsInList.Observe(float64(len(items))) if err := r.syncWith(items, resourceVersion); err != nil { return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err) } @@ -212,7 +199,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { return } if r.ShouldResync == nil || r.ShouldResync() { - glog.V(4).Infof("%s: forcing resync", r.name) + klog.V(4).Infof("%s: forcing resync", r.name) if err := r.store.Resync(); err != nil { resyncerrc <- err return @@ -239,14 +226,13 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { TimeoutSeconds: &timeoutSeconds, } - r.metrics.numberOfWatches.Inc() w, err := r.listerWatcher.Watch(options) if err != nil { switch err { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: - glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err) + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err) default: utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err)) } @@ -267,7 +253,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil { if err != errorStopRequested { - glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) } return nil } @@ -291,11 +277,6 @@ func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, err // Stopping the watcher should be idempotent and if we return from this function there's no way // we're coming back in with the same watch interface. defer w.Stop() - // update metrics - defer func() { - r.metrics.numberOfItemsInWatch.Observe(float64(eventCount)) - r.metrics.watchDuration.Observe(time.Since(start).Seconds()) - }() loop: for { @@ -351,10 +332,9 @@ loop: watchDuration := r.clock.Now().Sub(start) if watchDuration < 1*time.Second && eventCount == 0 { - r.metrics.numberOfShortWatches.Inc() return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) } - glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) + klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) return nil } @@ -370,9 +350,4 @@ func (r *Reflector) setLastSyncResourceVersion(v string) { r.lastSyncResourceVersionMutex.Lock() defer r.lastSyncResourceVersionMutex.Unlock() r.lastSyncResourceVersion = v - - rv, err := strconv.Atoi(v) - if err == nil { - r.metrics.lastResourceVersion.Set(float64(rv)) - } } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 5f8c507f9e9d0..e91fc9e95583a 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -28,7 +28,7 @@ import ( "k8s.io/client-go/util/buffer" "k8s.io/client-go/util/retry" - "github.com/golang/glog" + "k8s.io/klog" ) // SharedInformer has a shared data cache and is capable of distributing notifications for changes @@ -86,7 +86,7 @@ func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEve resyncCheckPeriod: defaultEventHandlerResyncPeriod, defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)), - clock: realClock, + clock: realClock, } return sharedIndexInformer } @@ -116,11 +116,11 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool }, stopCh) if err != nil { - glog.V(2).Infof("stop requested") + klog.V(2).Infof("stop requested") return false } - glog.V(4).Infof("caches populated") + klog.V(4).Infof("caches populated") return true } @@ -279,11 +279,11 @@ func determineResyncPeriod(desired, check time.Duration) time.Duration { return desired } if check == 0 { - glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) + klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) return 0 } if desired < check { - glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) + klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) return check } return desired @@ -296,19 +296,19 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv defer s.startedLock.Unlock() if s.stopped { - glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) + klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) return } if resyncPeriod > 0 { if resyncPeriod < minimumResyncPeriod { - glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) + klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) resyncPeriod = minimumResyncPeriod } if resyncPeriod < s.resyncCheckPeriod { if s.started { - glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) resyncPeriod = s.resyncCheckPeriod } else { // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update diff --git a/vendor/k8s.io/client-go/tools/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go index 4958987f0e73e..fc844efe64d61 100755 --- a/vendor/k8s.io/client-go/tools/cache/store.go +++ b/vendor/k8s.io/client-go/tools/cache/store.go @@ -210,7 +210,7 @@ func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) // 'c' takes ownership of the list, you should not reference the list again // after calling this function. func (c *cache) Replace(list []interface{}, resourceVersion string) error { - items := map[string]interface{}{} + items := make(map[string]interface{}, len(list)) for _, item := range list { key, err := c.keyFunc(item) if err != nil { diff --git a/vendor/k8s.io/client-go/tools/clientcmd/BUILD.bazel b/vendor/k8s.io/client-go/tools/clientcmd/BUILD.bazel index dd7ed5c7012ba..27b1389a00cac 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/BUILD.bazel +++ b/vendor/k8s.io/client-go/tools/clientcmd/BUILD.bazel @@ -18,7 +18,6 @@ go_library( importpath = "k8s.io/client-go/tools/clientcmd", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/imdario/mergo:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", @@ -31,5 +30,6 @@ go_library( "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd/api/latest:go_default_library", "//vendor/k8s.io/client-go/util/homedir:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go index 0a081871ac824..5871575a66932 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -15,4 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package + package api diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go index 9750cf73acca6..cbf29ccf24daf 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go @@ -15,4 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package + package v1 diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index b8927f71087e5..a7b8c1c6e42b1 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -24,8 +24,8 @@ import ( "os" "strings" - "github.com/golang/glog" "github.com/imdario/mergo" + "k8s.io/klog" restclient "k8s.io/client-go/rest" clientauth "k8s.io/client-go/tools/auth" @@ -234,6 +234,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI return nil, err } mergedConfig.BearerToken = string(tokenBytes) + mergedConfig.BearerTokenFile = configAuthInfo.TokenFile } if len(configAuthInfo.Impersonate) > 0 { mergedConfig.Impersonate = restclient.ImpersonationConfig{ @@ -545,12 +546,12 @@ func (config *inClusterClientConfig) Possible() bool { // to the default config. func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) { if kubeconfigPath == "" && masterUrl == "" { - glog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") + klog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") kubeconfig, err := restclient.InClusterConfig() if err == nil { return kubeconfig, nil } - glog.Warning("error creating inClusterConfig, falling back to default config: ", err) + klog.Warning("error creating inClusterConfig, falling back to default config: ", err) } return NewNonInteractiveDeferredLoadingClientConfig( &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go index 9495849b09284..b8cc39688219f 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -24,7 +24,7 @@ import ( "reflect" "sort" - "github.com/golang/glog" + "k8s.io/klog" restclient "k8s.io/client-go/rest" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -483,7 +483,7 @@ func getConfigFromFile(filename string) (*clientcmdapi.Config, error) { func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { config, err := getConfigFromFile(filename) if err != nil { - glog.FatalDepth(1, err) + klog.FatalDepth(1, err) } return config diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go index 6038c8d457a19..7e928a918563e 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -27,8 +27,8 @@ import ( goruntime "runtime" "strings" - "github.com/golang/glog" "github.com/imdario/mergo" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -356,7 +356,7 @@ func LoadFromFile(filename string) (*clientcmdapi.Config, error) { if err != nil { return nil, err } - glog.V(6).Infoln("Config loaded from file", filename) + klog.V(6).Infoln("Config loaded from file", filename) // set LocationOfOrigin on every Cluster, User, and Context for key, obj := range config.AuthInfos { diff --git a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go index 05038133b6b84..76380db82ab95 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -20,7 +20,7 @@ import ( "io" "sync" - "github.com/golang/glog" + "k8s.io/klog" restclient "k8s.io/client-go/rest" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -119,7 +119,7 @@ func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, e // check for in-cluster configuration and use it if config.icc.Possible() { - glog.V(4).Infof("Using in-cluster configuration") + klog.V(4).Infof("Using in-cluster configuration") return config.icc.ClientConfig() } @@ -156,7 +156,7 @@ func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { } } - glog.V(4).Infof("Using in-cluster namespace") + klog.V(4).Infof("Using in-cluster namespace") // allow the namespace from the service account token directory to be used. return config.icc.Namespace() diff --git a/vendor/k8s.io/client-go/tools/portforward/portforward.go b/vendor/k8s.io/client-go/tools/portforward/portforward.go index 9d7936e7c5fae..0e9b369a98317 100644 --- a/vendor/k8s.io/client-go/tools/portforward/portforward.go +++ b/vendor/k8s.io/client-go/tools/portforward/portforward.go @@ -39,8 +39,9 @@ const PortForwardProtocolV1Name = "portforward.k8s.io" // PortForwarder knows how to listen for local connections and forward them to // a remote pod via an upgraded HTTP request. type PortForwarder struct { - ports []ForwardedPort - stopChan <-chan struct{} + addresses []listenAddress + ports []ForwardedPort + stopChan <-chan struct{} dialer httpstream.Dialer streamConn httpstream.Connection @@ -110,8 +111,52 @@ func parsePorts(ports []string) ([]ForwardedPort, error) { return forwards, nil } -// New creates a new PortForwarder. +type listenAddress struct { + address string + protocol string + failureMode string +} + +func parseAddresses(addressesToParse []string) ([]listenAddress, error) { + var addresses []listenAddress + parsed := make(map[string]listenAddress) + for _, address := range addressesToParse { + if address == "localhost" { + ip := listenAddress{address: "127.0.0.1", protocol: "tcp4", failureMode: "all"} + parsed[ip.address] = ip + ip = listenAddress{address: "::1", protocol: "tcp6", failureMode: "all"} + parsed[ip.address] = ip + } else if net.ParseIP(address).To4() != nil { + parsed[address] = listenAddress{address: address, protocol: "tcp4", failureMode: "any"} + } else if net.ParseIP(address) != nil { + parsed[address] = listenAddress{address: address, protocol: "tcp6", failureMode: "any"} + } else { + return nil, fmt.Errorf("%s is not a valid IP", address) + } + } + addresses = make([]listenAddress, len(parsed)) + id := 0 + for _, v := range parsed { + addresses[id] = v + id++ + } + return addresses, nil +} + +// New creates a new PortForwarder with localhost listen addresses. func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) { + return NewOnAddresses(dialer, []string{"localhost"}, ports, stopChan, readyChan, out, errOut) +} + +// NewOnAddresses creates a new PortForwarder with custom listen addresses. +func NewOnAddresses(dialer httpstream.Dialer, addresses []string, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) { + if len(addresses) == 0 { + return nil, errors.New("You must specify at least 1 address") + } + parsedAddresses, err := parseAddresses(addresses) + if err != nil { + return nil, err + } if len(ports) == 0 { return nil, errors.New("You must specify at least 1 port") } @@ -120,12 +165,13 @@ func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}, rea return nil, err } return &PortForwarder{ - dialer: dialer, - ports: parsedPorts, - stopChan: stopChan, - Ready: readyChan, - out: out, - errOut: errOut, + dialer: dialer, + addresses: parsedAddresses, + ports: parsedPorts, + stopChan: stopChan, + Ready: readyChan, + out: out, + errOut: errOut, }, nil } @@ -181,13 +227,26 @@ func (pf *PortForwarder) forward() error { return nil } -// listenOnPort delegates tcp4 and tcp6 listener creation and waits for connections on both of these addresses. -// If both listener creation fail, an error is raised. +// listenOnPort delegates listener creation and waits for connections on requested bind addresses. +// An error is raised based on address groups (default and localhost) and their failure modes func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error { - errTcp4 := pf.listenOnPortAndAddress(port, "tcp4", "127.0.0.1") - errTcp6 := pf.listenOnPortAndAddress(port, "tcp6", "::1") - if errTcp4 != nil && errTcp6 != nil { - return fmt.Errorf("All listeners failed to create with the following errors: %s, %s", errTcp4, errTcp6) + var errors []error + failCounters := make(map[string]int, 2) + successCounters := make(map[string]int, 2) + for _, addr := range pf.addresses { + err := pf.listenOnPortAndAddress(port, addr.protocol, addr.address) + if err != nil { + errors = append(errors, err) + failCounters[addr.failureMode]++ + } else { + successCounters[addr.failureMode]++ + } + } + if successCounters["all"] == 0 && failCounters["all"] > 0 { + return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors) + } + if failCounters["any"] > 0 { + return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors) } return nil } @@ -216,6 +275,7 @@ func (pf *PortForwarder) getListener(protocol string, hostname string, port *For localPortUInt, err := strconv.ParseUint(localPort, 10, 16) if err != nil { + fmt.Fprintf(pf.out, "Failed to forward from %s:%d -> %d\n", hostname, localPortUInt, port.Remote) return nil, fmt.Errorf("Error parsing local port: %s from %s (%s)", err, listenerAddress, host) } port.Local = uint16(localPortUInt) @@ -340,3 +400,20 @@ func (pf *PortForwarder) Close() { } } } + +// GetPorts will return the ports that were forwarded; this can be used to +// retrieve the locally-bound port in cases where the input was port 0. This +// function will signal an error if the Ready channel is nil or if the +// listeners are not ready yet; this function will succeed after the Ready +// channel has been closed. +func (pf *PortForwarder) GetPorts() ([]ForwardedPort, error) { + if pf.Ready == nil { + return nil, fmt.Errorf("no Ready channel provided") + } + select { + case <-pf.Ready: + return pf.ports, nil + default: + return nil, fmt.Errorf("listeners not ready") + } +} diff --git a/vendor/k8s.io/client-go/tools/record/BUILD.bazel b/vendor/k8s.io/client-go/tools/record/BUILD.bazel index 6c8508e3655eb..37413b187435c 100644 --- a/vendor/k8s.io/client-go/tools/record/BUILD.bazel +++ b/vendor/k8s.io/client-go/tools/record/BUILD.bazel @@ -12,7 +12,6 @@ go_library( importpath = "k8s.io/client-go/tools/record", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -26,5 +25,6 @@ go_library( "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/reference:go_default_library", "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index 168dfa80c56ab..2ee69589c6d25 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -33,7 +33,7 @@ import ( "net/http" - "github.com/golang/glog" + "k8s.io/klog" ) const maxTriesPerEvent = 12 @@ -144,7 +144,7 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela } tries++ if tries >= maxTriesPerEvent { - glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) break } // Randomize the first sleep so that various clients won't all be @@ -194,13 +194,13 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv switch err.(type) { case *restclient.RequestConstructionError: // We will construct the request the same next time, so don't keep trying. - glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) return true case *errors.StatusError: if errors.IsAlreadyExists(err) { - glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) } else { - glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) } return true case *errors.UnexpectedObjectError: @@ -209,7 +209,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv default: // This case includes actual http transport errors. Go ahead and retry. } - glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) return false } @@ -256,12 +256,12 @@ type recorderImpl struct { func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) { ref, err := ref.GetReference(recorder.scheme, object) if err != nil { - glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) return } if !validateEventType(eventtype) { - glog.Errorf("Unsupported event type: '%v'", eventtype) + klog.Errorf("Unsupported event type: '%v'", eventtype) return } diff --git a/vendor/k8s.io/client-go/tools/remotecommand/BUILD.bazel b/vendor/k8s.io/client-go/tools/remotecommand/BUILD.bazel index c2c9fe65bef58..a3359c21c9161 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/BUILD.bazel +++ b/vendor/k8s.io/client-go/tools/remotecommand/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "doc.go", "errorstream.go", + "reader.go", "remotecommand.go", "resize.go", "v1.go", @@ -16,7 +17,6 @@ go_library( importpath = "k8s.io/client-go/tools/remotecommand", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", @@ -25,5 +25,6 @@ go_library( "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/transport/spdy:go_default_library", "//vendor/k8s.io/client-go/util/exec:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/tools/remotecommand/reader.go b/vendor/k8s.io/client-go/tools/remotecommand/reader.go new file mode 100644 index 0000000000000..d1f1be34c9e94 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/reader.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "io" +) + +// readerWrapper delegates to an io.Reader so that only the io.Reader interface is implemented, +// to keep io.Copy from doing things we don't want when copying from the reader to the data stream. +// +// If the Stdin io.Reader provided to remotecommand implements a WriteTo function (like bytes.Buffer does[1]), +// io.Copy calls that method[2] to attempt to write the entire buffer to the stream in one call. +// That results in an oversized call to spdystream.Stream#Write [3], +// which results in a single oversized data frame[4] that is too large. +// +// [1] https://golang.org/pkg/bytes/#Buffer.WriteTo +// [2] https://golang.org/pkg/io/#Copy +// [3] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/stream.go#L66-L73 +// [4] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/spdy/write.go#L302-L304 +type readerWrapper struct { + reader io.Reader +} + +func (r readerWrapper) Read(p []byte) (int, error) { + return r.reader.Read(p) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go index d2b29861e6eeb..892d8d105dc31 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go +++ b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go @@ -22,7 +22,7 @@ import ( "net/http" "net/url" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/apimachinery/pkg/util/remotecommand" @@ -132,7 +132,7 @@ func (e *streamExecutor) Stream(options StreamOptions) error { case remotecommand.StreamProtocolV2Name: streamer = newStreamProtocolV2(options) case "": - glog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) + klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) fallthrough case remotecommand.StreamProtocolV1Name: streamer = newStreamProtocolV1(options) diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v1.go b/vendor/k8s.io/client-go/tools/remotecommand/v1.go index 92dad727f301f..4120f1f5f3dd2 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/v1.go +++ b/vendor/k8s.io/client-go/tools/remotecommand/v1.go @@ -22,9 +22,9 @@ import ( "io/ioutil" "net/http" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog" ) // streamProtocolV1 implements the first version of the streaming exec & attach @@ -53,10 +53,10 @@ func (p *streamProtocolV1) stream(conn streamCreator) error { errorChan := make(chan error) cp := func(s string, dst io.Writer, src io.Reader) { - glog.V(6).Infof("Copying %s", s) - defer glog.V(6).Infof("Done copying %s", s) + klog.V(6).Infof("Copying %s", s) + defer klog.V(6).Infof("Done copying %s", s) if _, err := io.Copy(dst, src); err != nil && err != io.EOF { - glog.Errorf("Error copying %s: %v", s, err) + klog.Errorf("Error copying %s: %v", s, err) } if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr { doneChan <- struct{}{} @@ -127,7 +127,7 @@ func (p *streamProtocolV1) stream(conn streamCreator) error { // because stdin is not closed until the process exits. If we try to call // stdin.Close(), it returns no error but doesn't unblock the copy. It will // exit when the process exits, instead. - go cp(v1.StreamTypeStdin, p.remoteStdin, p.Stdin) + go cp(v1.StreamTypeStdin, p.remoteStdin, readerWrapper{p.Stdin}) } waitCount := 0 diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v2.go b/vendor/k8s.io/client-go/tools/remotecommand/v2.go index b74ae8de2208c..4b0001502a17c 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/v2.go +++ b/vendor/k8s.io/client-go/tools/remotecommand/v2.go @@ -101,7 +101,7 @@ func (p *streamProtocolV2) copyStdin() { // the executed command will remain running. defer once.Do(func() { p.remoteStdin.Close() }) - if _, err := io.Copy(p.remoteStdin, p.Stdin); err != nil { + if _, err := io.Copy(p.remoteStdin, readerWrapper{p.Stdin}); err != nil { runtime.HandleError(err) } }() diff --git a/vendor/k8s.io/client-go/tools/watch/BUILD.bazel b/vendor/k8s.io/client-go/tools/watch/BUILD.bazel index 03da776d5acbf..5e7ebf4e1c052 100644 --- a/vendor/k8s.io/client-go/tools/watch/BUILD.bazel +++ b/vendor/k8s.io/client-go/tools/watch/BUILD.bazel @@ -10,12 +10,12 @@ go_library( importpath = "k8s.io/client-go/tools/watch", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/tools/watch/until.go b/vendor/k8s.io/client-go/tools/watch/until.go index 9335788439767..aa4bbc21169fa 100644 --- a/vendor/k8s.io/client-go/tools/watch/until.go +++ b/vendor/k8s.io/client-go/tools/watch/until.go @@ -22,13 +22,13 @@ import ( "fmt" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/tools/cache" + "k8s.io/klog" ) // PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet, @@ -135,7 +135,7 @@ func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime. func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { if timeout < 0 { // This should be handled in validation - glog.Errorf("Timeout for context shall not be negative!") + klog.Errorf("Timeout for context shall not be negative!") timeout = 0 } diff --git a/vendor/k8s.io/client-go/transport/BUILD.bazel b/vendor/k8s.io/client-go/transport/BUILD.bazel index e1570fb21462f..0028c4373f66b 100644 --- a/vendor/k8s.io/client-go/transport/BUILD.bazel +++ b/vendor/k8s.io/client-go/transport/BUILD.bazel @@ -6,13 +6,15 @@ go_library( "cache.go", "config.go", "round_trippers.go", + "token_source.go", "transport.go", ], importmap = "k8s.io/kops/vendor/k8s.io/client-go/transport", importpath = "k8s.io/client-go/transport", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 4081c23e7ff08..acb126d8b09cb 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -39,6 +39,11 @@ type Config struct { // Bearer token for authentication BearerToken string + // Path to a file containing a BearerToken. + // If set, the contents are periodically read. + // The last successfully read value takes precedence over BearerToken. + BearerTokenFile string + // Impersonate is the config that this Config will impersonate using Impersonate ImpersonationConfig @@ -80,7 +85,7 @@ func (c *Config) HasBasicAuth() bool { // HasTokenAuth returns whether the configuration has token authentication or not. func (c *Config) HasTokenAuth() bool { - return len(c.BearerToken) != 0 + return len(c.BearerToken) != 0 || len(c.BearerTokenFile) != 0 } // HasCertAuth returns whether the configuration has certificate authentication or not. diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 0ebcbbc803736..117a9c8c4de41 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -22,7 +22,8 @@ import ( "strings" "time" - "github.com/golang/glog" + "golang.org/x/oauth2" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" ) @@ -44,7 +45,11 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip case config.HasBasicAuth() && config.HasTokenAuth(): return nil, fmt.Errorf("username/password or bearer token may be set, but not both") case config.HasTokenAuth(): - rt = NewBearerAuthRoundTripper(config.BearerToken, rt) + var err error + rt, err = NewBearerAuthWithRefreshRoundTripper(config.BearerToken, config.BearerTokenFile, rt) + if err != nil { + return nil, err + } case config.HasBasicAuth(): rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt) } @@ -62,13 +67,13 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip // DebugWrappers wraps a round tripper and logs based on the current log level. func DebugWrappers(rt http.RoundTripper) http.RoundTripper { switch { - case bool(glog.V(9)): + case bool(klog.V(9)): rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) - case bool(glog.V(8)): + case bool(klog.V(8)): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) - case bool(glog.V(7)): + case bool(klog.V(7)): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) - case bool(glog.V(6)): + case bool(klog.V(6)): rt = newDebuggingRoundTripper(rt, debugURLTiming) } @@ -138,7 +143,7 @@ func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) + klog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -166,7 +171,7 @@ func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) + klog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -197,7 +202,7 @@ func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) + klog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -257,7 +262,7 @@ func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.delegate.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.delegate) + klog.Errorf("CancelRequest not implemented by %T", rt.delegate) } } @@ -265,13 +270,35 @@ func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { r type bearerAuthRoundTripper struct { bearer string + source oauth2.TokenSource rt http.RoundTripper } // NewBearerAuthRoundTripper adds the provided bearer token to a request // unless the authorization header has already been set. func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { - return &bearerAuthRoundTripper{bearer, rt} + return &bearerAuthRoundTripper{bearer, nil, rt} +} + +// NewBearerAuthRoundTripper adds the provided bearer token to a request +// unless the authorization header has already been set. +// If tokenFile is non-empty, it is periodically read, +// and the last successfully read content is used as the bearer token. +// If tokenFile is non-empty and bearer is empty, the tokenFile is read +// immediately to populate the initial bearer token. +func NewBearerAuthWithRefreshRoundTripper(bearer string, tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) { + if len(tokenFile) == 0 { + return &bearerAuthRoundTripper{bearer, nil, rt}, nil + } + source := NewCachedFileTokenSource(tokenFile) + if len(bearer) == 0 { + token, err := source.Token() + if err != nil { + return nil, err + } + bearer = token.AccessToken + } + return &bearerAuthRoundTripper{bearer, source, rt}, nil } func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -280,7 +307,13 @@ func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, } req = utilnet.CloneRequest(req) - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer)) + token := rt.bearer + if rt.source != nil { + if refreshedToken, err := rt.source.Token(); err == nil { + token = refreshedToken.AccessToken + } + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) return rt.rt.RoundTrip(req) } @@ -288,7 +321,7 @@ func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) + klog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -372,7 +405,7 @@ func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) + klog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) } } @@ -380,17 +413,17 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e reqInfo := newRequestInfo(req) if rt.levels[debugJustURL] { - glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) + klog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) } if rt.levels[debugCurlCommand] { - glog.Infof("%s", reqInfo.toCurl()) + klog.Infof("%s", reqInfo.toCurl()) } if rt.levels[debugRequestHeaders] { - glog.Infof("Request Headers:") + klog.Infof("Request Headers:") for key, values := range reqInfo.RequestHeaders { for _, value := range values { - glog.Infof(" %s: %s", key, value) + klog.Infof(" %s: %s", key, value) } } } @@ -402,16 +435,16 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e reqInfo.complete(response, err) if rt.levels[debugURLTiming] { - glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } if rt.levels[debugResponseStatus] { - glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } if rt.levels[debugResponseHeaders] { - glog.Infof("Response Headers:") + klog.Infof("Response Headers:") for key, values := range reqInfo.ResponseHeaders { for _, value := range values { - glog.Infof(" %s: %s", key, value) + klog.Infof(" %s: %s", key, value) } } } diff --git a/vendor/k8s.io/client-go/rest/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go similarity index 79% rename from vendor/k8s.io/client-go/rest/token_source.go rename to vendor/k8s.io/client-go/transport/token_source.go index 296b2a0481dfa..8595df2716945 100644 --- a/vendor/k8s.io/client-go/rest/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rest +package transport import ( "fmt" @@ -24,8 +24,8 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/oauth2" + "k8s.io/klog" ) // TokenSourceWrapTransport returns a WrapTransport that injects bearer tokens @@ -42,17 +42,19 @@ func TokenSourceWrapTransport(ts oauth2.TokenSource) func(http.RoundTripper) htt } } -func newCachedPathTokenSource(path string) oauth2.TokenSource { +// NewCachedFileTokenSource returns a oauth2.TokenSource reads a token from a +// file at a specified path and periodically reloads it. +func NewCachedFileTokenSource(path string) oauth2.TokenSource { return &cachingTokenSource{ now: time.Now, - leeway: 1 * time.Minute, + leeway: 10 * time.Second, base: &fileTokenSource{ path: path, - // This period was picked because it is half of the minimum validity - // duration for a token provisioned by they TokenRequest API. This is - // unsophisticated and should induce rotation at a frequency that should - // work with the token volume source. - period: 5 * time.Minute, + // This period was picked because it is half of the duration between when the kubelet + // refreshes a projected service account token and when the original token expires. + // Default token lifetime is 10 minutes, and the kubelet starts refreshing at 80% of lifetime. + // This should induce re-reading at a frequency that works with the token volume source. + period: time.Minute, }, } } @@ -129,7 +131,7 @@ func (ts *cachingTokenSource) Token() (*oauth2.Token, error) { if ts.tok == nil { return nil, err } - glog.Errorf("Unable to rotate token: %v", err) + klog.Errorf("Unable to rotate token: %v", err) return ts.tok, nil } diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS new file mode 100644 index 0000000000000..470b7a1c92d15 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index fe2158b238de4..3429c82cdff90 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -18,6 +18,7 @@ package cert import ( "bytes" + "crypto" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" @@ -64,7 +65,7 @@ func NewPrivateKey() (*rsa.PrivateKey, error) { } // NewSelfSignedCACert creates a CA certificate -func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, error) { +func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) { now := time.Now() tmpl := x509.Certificate{ SerialNumber: new(big.Int).SetInt64(0), @@ -76,7 +77,7 @@ func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, er NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) @@ -87,7 +88,7 @@ func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, er } // NewSignedCert creates a signed certificate using the given CA certificate and key -func NewSignedCert(cfg Config, key *rsa.PrivateKey, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, error) { +func NewSignedCert(cfg Config, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer) (*x509.Certificate, error) { serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64)) if err != nil { return nil, err @@ -187,7 +188,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey) @@ -259,34 +260,6 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a return certBuffer.Bytes(), keyBuffer.Bytes(), nil } -// FormatBytesCert receives byte array certificate and formats in human-readable format -func FormatBytesCert(cert []byte) (string, error) { - block, _ := pem.Decode(cert) - c, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return "", fmt.Errorf("failed to parse certificate [%v]", err) - } - return FormatCert(c), nil -} - -// FormatCert receives certificate and formats in human-readable format -func FormatCert(c *x509.Certificate) string { - var ips []string - for _, ip := range c.IPAddresses { - ips = append(ips, ip.String()) - } - altNames := append(ips, c.DNSNames...) - res := fmt.Sprintf( - "Issuer: CN=%s | Subject: CN=%s | CA: %t\n", - c.Issuer.CommonName, c.Subject.CommonName, c.IsCA, - ) - res += fmt.Sprintf("Not before: %s Not After: %s", c.NotBefore, c.NotAfter) - if len(altNames) > 0 { - res += fmt.Sprintf("\nAlternate Names: %v", altNames) - } - return res -} - func ipsToStrings(ips []net.IP) []string { ss := make([]string, 0, len(ips)) for _, ip := range ips { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD.bazel b/vendor/k8s.io/cloud-provider/BUILD.bazel similarity index 60% rename from vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD.bazel rename to vendor/k8s.io/cloud-provider/BUILD.bazel index 90486b9931b1c..11dd0ff87c729 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD.bazel +++ b/vendor/k8s.io/cloud-provider/BUILD.bazel @@ -7,14 +7,15 @@ go_library( "doc.go", "plugins.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/cloudprovider", - importpath = "k8s.io/kubernetes/pkg/cloudprovider", + importmap = "k8s.io/kops/vendor/k8s.io/cloud-provider", + importpath = "k8s.io/cloud-provider", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/controller:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/cloud-provider/CONTRIBUTING.md b/vendor/k8s.io/cloud-provider/CONTRIBUTING.md new file mode 100644 index 0000000000000..fe4643fdb942e --- /dev/null +++ b/vendor/k8s.io/cloud-provider/CONTRIBUTING.md @@ -0,0 +1,9 @@ +# Contributing guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. Changes to this repo should be discussed with [sig cloud-provider](https://github.com/kubernetes/community/tree/master/sig-cloud-provider). + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/cloud-provider](https://git.k8s.io/kubernetes/staging/src/k8s.io/cloud-provider) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/sig-architecture/staging.md) for more information diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/k8s.io/cloud-provider/LICENSE similarity index 94% rename from vendor/github.com/opencontainers/image-spec/LICENSE rename to vendor/k8s.io/cloud-provider/LICENSE index 9fdc20fdb6a80..8dada3edaf50d 100644 --- a/vendor/github.com/opencontainers/image-spec/LICENSE +++ b/vendor/k8s.io/cloud-provider/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -176,7 +175,18 @@ END OF TERMS AND CONDITIONS - Copyright 2016 The Linux Foundation. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS b/vendor/k8s.io/cloud-provider/OWNERS similarity index 83% rename from vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS rename to vendor/k8s.io/cloud-provider/OWNERS index 314eb3d97ae50..8ce19864150e3 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS +++ b/vendor/k8s.io/cloud-provider/OWNERS @@ -1,22 +1,19 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - mikedanese - dims - wlan0 - andrewsykim +- cheftako reviewers: -- thockin -- lavalamp -- smarterclayton - wojtek-t - deads2k -- brendandburns - derekwaynecarr -- caesarxuchao - vishh - mikedanese - liggitt - gmarek -- erictune - davidopp - pmorie - sttts @@ -43,5 +40,7 @@ reviewers: - wlan0 - cheftako - andrewsykim +- mcrute labels: - sig/cloud-provider +- area/cloudprovider diff --git a/vendor/k8s.io/cloud-provider/README.md b/vendor/k8s.io/cloud-provider/README.md new file mode 100644 index 0000000000000..d53fec5a4d5cc --- /dev/null +++ b/vendor/k8s.io/cloud-provider/README.md @@ -0,0 +1,32 @@ +# cloud-provider + +This repository defines the cloud-provider interface and mechanism to initialize +a cloud-provider implementation into Kubernetes. Currently multiple processes +use this code although the intent is that it will eventually only be cloud +controller manager. + +**Note:** go-get or vendor this package as `k8s.io/cloud-provider`. + +## Purpose + +This library is a shared dependency for processes which need to be able to +integrate with cloud-provider specific functionality. + +## Compatibility + +Cloud Providers are expected to keep the HEAD of their implementations in sync +with the HEAD of this repository. + +## Where does it come from? + +`cloud-provider` is synced from +https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/cloud-provider. +Code changes are made in that location, merged into k8s.io/kubernetes and +later synced here. + +## Things you should NOT do + + 1. Add an cloud provider specific code to this repo. + 2. Directly modify anything under vendor/k8s.io/cloud-provider in this repo. Those are driven from `k8s.io/kubernetes/staging/src/k8s.io/cloud-provider`. + 3. Make interface changes without first discussing them with + sig-cloudprovider. diff --git a/vendor/k8s.io/cloud-provider/SECURITY_CONTACTS b/vendor/k8s.io/cloud-provider/SECURITY_CONTACTS new file mode 100644 index 0000000000000..c8282d6bd6950 --- /dev/null +++ b/vendor/k8s.io/cloud-provider/SECURITY_CONTACTS @@ -0,0 +1,15 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +cheftako +andrewsykim +dims diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go b/vendor/k8s.io/cloud-provider/cloud.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go rename to vendor/k8s.io/cloud-provider/cloud.go index f0fd8864c23d9..6db0219520bfd 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go +++ b/vendor/k8s.io/cloud-provider/cloud.go @@ -25,14 +25,26 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/informers" - "k8s.io/kubernetes/pkg/controller" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" ) +// ControllerClientBuilder allows you to get clients and configs for controllers +// Please note a copy also exists in pkg/controller/client_builder.go +// TODO: Make this depend on the separate controller utilities repo (issues/68947) +type ControllerClientBuilder interface { + Config(name string) (*restclient.Config, error) + ConfigOrDie(name string) *restclient.Config + Client(name string) (clientset.Interface, error) + ClientOrDie(name string) clientset.Interface +} + // Interface is an abstract, pluggable interface for cloud providers. type Interface interface { // Initialize provides the cloud with a kubernetes client builder and may spawn goroutines - // to perform housekeeping activities within the cloud provider. - Initialize(clientBuilder controller.ControllerClientBuilder) + // to perform housekeeping or run custom controllers specific to the cloud provider. + // Any tasks started here should be cleaned up when the stop channel closes. + Initialize(clientBuilder ControllerClientBuilder, stop <-chan struct{}) // LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise. LoadBalancer() (LoadBalancer, bool) // Instances returns an instances interface. Also returns true if the interface is supported, false otherwise. diff --git a/vendor/k8s.io/cloud-provider/code-of-conduct.md b/vendor/k8s.io/cloud-provider/code-of-conduct.md new file mode 100644 index 0000000000000..0d15c00cf3252 --- /dev/null +++ b/vendor/k8s.io/cloud-provider/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/doc.go b/vendor/k8s.io/cloud-provider/doc.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/cloudprovider/doc.go rename to vendor/k8s.io/cloud-provider/doc.go index 41f8ae8607b09..6b401e4564e9c 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/doc.go +++ b/vendor/k8s.io/cloud-provider/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package cloudprovider supplies interfaces and implementations for cloud service providers. -package cloudprovider // import "k8s.io/kubernetes/pkg/cloudprovider" +package cloudprovider // import "k8s.io/cloud-provider" diff --git a/vendor/k8s.io/cloud-provider/go.mod b/vendor/k8s.io/cloud-provider/go.mod new file mode 100644 index 0000000000000..f70095dc26421 --- /dev/null +++ b/vendor/k8s.io/cloud-provider/go.mod @@ -0,0 +1,24 @@ +// This is a generated file. Do not edit directly. + +module k8s.io/cloud-provider + +go 1.12 + +require ( + k8s.io/api v0.0.0-20190425012535-181e1f9c52c1 + k8s.io/apimachinery v0.0.0-20190425132440-17f84483f500 + k8s.io/apiserver v0.0.0-20190425173233-f2b86a81176b + k8s.io/client-go v0.0.0-20190425172711-65184652c889 + k8s.io/klog v0.3.0 + k8s.io/utils v0.0.0-20190221042446-c2654d5206da +) + +replace ( + golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f + golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 + golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 + k8s.io/api => k8s.io/api v0.0.0-20190425012535-181e1f9c52c1 + k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190425132440-17f84483f500 + k8s.io/apiserver => k8s.io/apiserver v0.0.0-20190425173233-f2b86a81176b + k8s.io/client-go => k8s.io/client-go v0.0.0-20190425172711-65184652c889 +) diff --git a/vendor/k8s.io/cloud-provider/go.sum b/vendor/k8s.io/cloud-provider/go.sum new file mode 100644 index 0000000000000..ee854c43ecaaa --- /dev/null +++ b/vendor/k8s.io/cloud-provider/go.sum @@ -0,0 +1,155 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/BurntSushi/toml v0.3.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.0.0-20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM= +github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20170330212424-2500245aa611/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/jonboulle/clockwork v0.0.0-20141017032234-72f9bd7c4e0c/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= +github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v0.0.0-20171019201919-bdcc60b419d1/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= +golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +k8s.io/api v0.0.0-20190425012535-181e1f9c52c1/go.mod h1:AhUc3Ph6fhRc0SCpt0Hwv0E+Q8QiEAASkXKwfmT2JwU= +k8s.io/apimachinery v0.0.0-20190425132440-17f84483f500/go.mod h1:5CBnzrKYGHzv9ZsSKmQ8wHt4XI4/TUBPDwYM9FlZMyw= +k8s.io/apiserver v0.0.0-20190425173233-f2b86a81176b/go.mod h1:omlj40TPI/OV4YFwPP09JuOkEkKbpS5bNE2T2sPeY80= +k8s.io/client-go v0.0.0-20190425172711-65184652c889/go.mod h1:PeVFCnjeDy6EwLN+wdDIZd1DwDY6jnkpQt9psMo5YRU= +k8s.io/component-base v0.0.0-20190424053038-9fe063da3132/go.mod h1:pi2NQz+AaW5UMjaswai1Hfzqzhh7bV6ssi3X3k4s03g= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= +k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= +k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= +sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go b/vendor/k8s.io/cloud-provider/plugins.go similarity index 84% rename from vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go rename to vendor/k8s.io/cloud-provider/plugins.go index e9aa90c7bcf9a..9fc6aff8cd255 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go +++ b/vendor/k8s.io/cloud-provider/plugins.go @@ -22,7 +22,7 @@ import ( "os" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // Factory is a function that returns a cloudprovider.Interface. @@ -40,10 +40,14 @@ var ( external bool detail string }{ - {"openstack", true, "https://github.com/kubernetes/cloud-provider-openstack"}, - {"photon", false, "The Photon Controller project is no longer maintained."}, + {"aws", false, "The AWS provider is deprecated and will be removed in a future release"}, + {"azure", false, "The Azure provider is deprecated and will be removed in a future release"}, {"cloudstack", false, "The CloudStack Controller project is no longer maintained."}, + {"gce", false, "The GCE provider is deprecated and will be removed in a future release"}, + {"openstack", true, "https://github.com/kubernetes/cloud-provider-openstack"}, {"ovirt", false, "The ovirt Controller project is no longer maintained."}, + {"photon", false, "The Photon Controller project is no longer maintained."}, + {"vsphere", false, "The vSphere provider is deprecated and will be removed in a future release"}, } ) @@ -55,9 +59,9 @@ func RegisterCloudProvider(name string, cloud Factory) { providersMutex.Lock() defer providersMutex.Unlock() if _, found := providers[name]; found { - glog.Fatalf("Cloud provider %q was registered twice", name) + klog.Fatalf("Cloud provider %q was registered twice", name) } - glog.V(1).Infof("Registered cloud provider %q", name) + klog.V(1).Infof("Registered cloud provider %q", name) providers[name] = cloud } @@ -96,12 +100,12 @@ func InitCloudProvider(name string, configFilePath string) (Interface, error) { var err error if name == "" { - glog.Info("No cloud provider specified.") + klog.Info("No cloud provider specified.") return nil, nil } if IsExternal(name) { - glog.Info("External cloud provider specified") + klog.Info("External cloud provider specified") return nil, nil } @@ -111,7 +115,7 @@ func InitCloudProvider(name string, configFilePath string) (Interface, error) { if provider.external { detail = fmt.Sprintf("Please use 'external' cloud provider for %s: %s", name, provider.detail) } - glog.Warningf("WARNING: %s built-in cloud provider is now deprecated. %s", name, detail) + klog.Warningf("WARNING: %s built-in cloud provider is now deprecated. %s", name, detail) break } @@ -121,7 +125,7 @@ func InitCloudProvider(name string, configFilePath string) (Interface, error) { var config *os.File config, err = os.Open(configFilePath) if err != nil { - glog.Fatalf("Couldn't open cloud provider configuration %s: %#v", + klog.Fatalf("Couldn't open cloud provider configuration %s: %#v", configFilePath, err) } diff --git a/vendor/k8s.io/code-generator/Godeps/Godeps.json b/vendor/k8s.io/code-generator/Godeps/Godeps.json index affc13ced2b64..4546feed41b3c 100644 --- a/vendor/k8s.io/code-generator/Godeps/Godeps.json +++ b/vendor/k8s.io/code-generator/Godeps/Godeps.json @@ -1,211 +1,115 @@ { "ImportPath": "k8s.io/code-generator", - "GoVersion": "go1.10", + "GoVersion": "go1.11", "GodepVersion": "v80", "Packages": [ "./..." ], "Deps": [ - { - "ImportPath": "github.com/PuerkitoBio/purell", - "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" - }, - { - "ImportPath": "github.com/PuerkitoBio/urlesc", - "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" - }, - { - "ImportPath": "github.com/emicklei/go-restful", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/emicklei/go-restful/log", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/go-openapi/jsonpointer", - "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" - }, - { - "ImportPath": "github.com/go-openapi/jsonreference", - "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" - }, - { - "ImportPath": "github.com/go-openapi/spec", - "Rev": "1de3e0542de65ad8d75452a595886fdd0befb363" - }, - { - "ImportPath": "github.com/go-openapi/swag", - "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" - }, { "ImportPath": "github.com/gogo/protobuf/gogoproto", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/compare", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/description", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/embedcheck", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/enumstringer", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/equal", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/face", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/gostring", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/marshalto", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/populate", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/size", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/stringer", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/testgen", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/union", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/plugin/unmarshal", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/proto", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/grpc", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/sortkeys", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/vanity", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/vanity/command", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" - }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, - { - "ImportPath": "github.com/mailru/easyjson/buffer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jlexer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jwriter", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/spf13/pflag", "Rev": "583c0c0531f06d5278b7d917446061adc344b5cd" }, - { - "ImportPath": "golang.org/x/net/idna", - "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" - }, - { - "ImportPath": "golang.org/x/text/cases", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal/tag", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/language", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/runes", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/secure/bidirule", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/secure/precis", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/transform", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/unicode/bidi", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/unicode/norm", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/width", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/tools/go/ast/astutil", "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" @@ -214,69 +118,49 @@ "ImportPath": "golang.org/x/tools/imports", "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" }, - { - "ImportPath": "gopkg.in/yaml.v2", - "Rev": "670d4cfef0544295bc27a114dbac37980d83185a" - }, { "ImportPath": "k8s.io/gengo/args", - "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" + "Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95" }, { "ImportPath": "k8s.io/gengo/examples/deepcopy-gen/generators", - "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" + "Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95" }, { "ImportPath": "k8s.io/gengo/examples/defaulter-gen/generators", - "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" + "Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95" }, { "ImportPath": "k8s.io/gengo/examples/import-boss/generators", - "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" + "Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95" }, { "ImportPath": "k8s.io/gengo/examples/set-gen/generators", - "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" + "Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95" }, { "ImportPath": "k8s.io/gengo/examples/set-gen/sets", - "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" + "Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95" }, { "ImportPath": "k8s.io/gengo/generator", - "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" + "Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95" }, { "ImportPath": "k8s.io/gengo/namer", - "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" + "Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95" }, { "ImportPath": "k8s.io/gengo/parser", - "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" + "Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95" }, { "ImportPath": "k8s.io/gengo/types", - "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" - }, - { - "ImportPath": "k8s.io/kube-openapi/cmd/openapi-gen/args", - "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" - }, - { - "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" - }, - { - "ImportPath": "k8s.io/kube-openapi/pkg/generators", - "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" - }, - { - "ImportPath": "k8s.io/kube-openapi/pkg/generators/rules", - "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" + "Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95" }, { - "ImportPath": "k8s.io/kube-openapi/pkg/util/sets", - "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" + "ImportPath": "k8s.io/klog", + "Rev": "8139d8cb77af419532b33dfa7dd09fbc5f1d344f" } ] } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/BUILD.bazel b/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/BUILD.bazel similarity index 70% rename from vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/BUILD.bazel rename to vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/BUILD.bazel index d94aabfcc38b2..0f0014b0b54ab 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/BUILD.bazel +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/BUILD.bazel @@ -7,9 +7,10 @@ go_library( "register.go", "types.go", "zz_generated.deepcopy.go", + "zz_generated.defaults.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration", - importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration", + importmap = "k8s.io/kops/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1", + importpath = "k8s.io/code-generator/_examples/MixedCase/apis/example/v1", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go b/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/doc.go similarity index 81% rename from vendor/k8s.io/kubernetes/pkg/util/labels/doc.go rename to vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/doc.go index a5e83763e62ed..e6614c0da665a 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/doc.go @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package labels provides utilities to work with Kubernetes labels. -package labels // import "k8s.io/kubernetes/pkg/util/labels" +// +k8s:deepcopy-gen=package +// +k8s:defaulter-gen=TypeMeta +// +groupName=example.crd.code-generator.k8s.io +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/register.go b/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/register.go similarity index 57% rename from vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/register.go rename to vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/register.go index ff505a577e14d..58371e0e9431e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/register.go +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,25 +14,21 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -const GroupName = "admissionregistration.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} +var SchemeGroupVersion = schema.GroupVersion{Group: "example.crd.code-generator.k8s.io", Version: "v1"} var ( - localSchemeBuilder = &admissionregistrationv1beta1.SchemeBuilder + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder AddToScheme = localSchemeBuilder.AddToScheme ) @@ -40,5 +36,24 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs) + localSchemeBuilder.Register(addKnownTypes) +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &TestType{}, + &TestTypeList{}, + ) + + scheme.AddKnownTypes(SchemeGroupVersion, + &metav1.Status{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil } diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/types.go b/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/types.go new file mode 100644 index 0000000000000..d79ea38b72075 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/types.go @@ -0,0 +1,74 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TestType is a top-level type. A client is created for it. +type TestType struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // +optional + Status TestTypeStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TestTypeList is a top-level list type. The client methods for lists are automatically created. +// You are not supposed to create a separated client for this one. +type TestTypeList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []TestType `json:"items"` +} + +type TestTypeStatus struct { + Blah string +} + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ClusterTestTypeList struct { + metav1.TypeMeta + metav1.ListMeta + Items []ClusterTestType +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale + +type ClusterTestType struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // +optional + Status ClusterTestTypeStatus `json:"status,omitempty"` +} + +type ClusterTestTypeStatus struct { + Blah string +} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/zz_generated.deepcopy.go b/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..a3b4bfa9c210b --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/zz_generated.deepcopy.go @@ -0,0 +1,177 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTestType) DeepCopyInto(out *ClusterTestType) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTestType. +func (in *ClusterTestType) DeepCopy() *ClusterTestType { + if in == nil { + return nil + } + out := new(ClusterTestType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTestType) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTestTypeList) DeepCopyInto(out *ClusterTestTypeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterTestType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTestTypeList. +func (in *ClusterTestTypeList) DeepCopy() *ClusterTestTypeList { + if in == nil { + return nil + } + out := new(ClusterTestTypeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTestTypeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTestTypeStatus) DeepCopyInto(out *ClusterTestTypeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTestTypeStatus. +func (in *ClusterTestTypeStatus) DeepCopy() *ClusterTestTypeStatus { + if in == nil { + return nil + } + out := new(ClusterTestTypeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestType) DeepCopyInto(out *TestType) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestType. +func (in *TestType) DeepCopy() *TestType { + if in == nil { + return nil + } + out := new(TestType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TestType) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestTypeList) DeepCopyInto(out *TestTypeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TestType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestTypeList. +func (in *TestTypeList) DeepCopy() *TestTypeList { + if in == nil { + return nil + } + out := new(TestTypeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TestTypeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestTypeStatus) DeepCopyInto(out *TestTypeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestTypeStatus. +func (in *TestTypeStatus) DeepCopy() *TestTypeStatus { + if in == nil { + return nil + } + out := new(TestTypeStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/zz_generated.defaults.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.defaults.go rename to vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/zz_generated.defaults.go index dd621a3acda82..cce2e603a69ad 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/zz_generated.defaults.go @@ -18,7 +18,7 @@ limitations under the License. // Code generated by defaulter-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/BUILD.bazel b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/BUILD.bazel new file mode 100644 index 0000000000000..061f72f0bbe3c --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "clientset.go", + "doc.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned", + importpath = "k8s.io/code-generator/_examples/MixedCase/clientset/versioned", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1:go_default_library", + ], +) diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/clientset.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/clientset.go new file mode 100644 index 0000000000000..72931e51ed5c8 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/clientset.go @@ -0,0 +1,98 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" + examplev1 "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + ExampleV1() examplev1.ExampleV1Interface + // Deprecated: please explicitly pick a version if possible. + Example() examplev1.ExampleV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + exampleV1 *examplev1.ExampleV1Client +} + +// ExampleV1 retrieves the ExampleV1Client +func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { + return c.exampleV1 +} + +// Deprecated: Example retrieves the default version of ExampleClient. +// Please explicitly pick a version. +func (c *Clientset) Example() examplev1.ExampleV1Interface { + return c.exampleV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.exampleV1, err = examplev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.exampleV1 = examplev1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.exampleV1 = examplev1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/doc.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go rename to vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/doc.go index 01b3d5e0f63c8..41721ca52d44e 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated clientset. -package internalclientset +package versioned diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/BUILD.bazel b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/BUILD.bazel new file mode 100644 index 0000000000000..dae1207ad6067 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/BUILD.bazel @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "clientset_generated.go", + "doc.go", + "register.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake", + importpath = "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/discovery/fake:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake:go_default_library", + ], +) diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/clientset_generated.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000000..9a7307606c636 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,82 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" + clientset "k8s.io/code-generator/_examples/MixedCase/clientset/versioned" + examplev1 "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1" + fakeexamplev1 "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +var _ clientset.Interface = &Clientset{} + +// ExampleV1 retrieves the ExampleV1Client +func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { + return &fakeexamplev1.FakeExampleV1{Fake: &c.Fake} +} + +// Example retrieves the ExampleV1Client +func (c *Clientset) Example() examplev1.ExampleV1Interface { + return &fakeexamplev1.FakeExampleV1{Fake: &c.Fake} +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/generated_expansion.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/doc.go similarity index 88% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/generated_expansion.go rename to vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/doc.go index f00428522bde3..9b99e7167091a 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/generated_expansion.go +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/doc.go @@ -16,6 +16,5 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package internalversion - -type HorizontalPodAutoscalerExpansion interface{} +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/register.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/register.go new file mode 100644 index 0000000000000..7fe8bc6a5f5b4 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/register.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + examplev1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + examplev1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/BUILD.bazel b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/BUILD.bazel new file mode 100644 index 0000000000000..91e621eca6a24 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme", + importpath = "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/doc.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/doc.go rename to vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/doc.go diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/register.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000000..2cf84f85a9717 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + examplev1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + examplev1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD.bazel b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/BUILD.bazel similarity index 50% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD.bazel rename to vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/BUILD.bazel index cbe203704236e..2f5463896d1c4 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD.bazel +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/BUILD.bazel @@ -3,22 +3,23 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "apps_client.go", - "controllerrevision.go", + "clustertesttype.go", "doc.go", + "example_client.go", "generated_expansion.go", - "statefulset.go", + "testtype.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", + importmap = "k8s.io/kops/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1", + importpath = "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/clustertesttype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/clustertesttype.go new file mode 100644 index 0000000000000..559ce47eb775a --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/clustertesttype.go @@ -0,0 +1,210 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" + scheme "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" +) + +// ClusterTestTypesGetter has a method to return a ClusterTestTypeInterface. +// A group's client should implement this interface. +type ClusterTestTypesGetter interface { + ClusterTestTypes() ClusterTestTypeInterface +} + +// ClusterTestTypeInterface has methods to work with ClusterTestType resources. +type ClusterTestTypeInterface interface { + Create(*v1.ClusterTestType) (*v1.ClusterTestType, error) + Update(*v1.ClusterTestType) (*v1.ClusterTestType, error) + UpdateStatus(*v1.ClusterTestType) (*v1.ClusterTestType, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ClusterTestType, error) + List(opts metav1.ListOptions) (*v1.ClusterTestTypeList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterTestType, err error) + GetScale(clusterTestTypeName string, options metav1.GetOptions) (*autoscaling.Scale, error) + UpdateScale(clusterTestTypeName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) + + ClusterTestTypeExpansion +} + +// clusterTestTypes implements ClusterTestTypeInterface +type clusterTestTypes struct { + client rest.Interface +} + +// newClusterTestTypes returns a ClusterTestTypes +func newClusterTestTypes(c *ExampleV1Client) *clusterTestTypes { + return &clusterTestTypes{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterTestType, and returns the corresponding clusterTestType object, and an error if there is any. +func (c *clusterTestTypes) Get(name string, options metav1.GetOptions) (result *v1.ClusterTestType, err error) { + result = &v1.ClusterTestType{} + err = c.client.Get(). + Resource("clustertesttypes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterTestTypes that match those selectors. +func (c *clusterTestTypes) List(opts metav1.ListOptions) (result *v1.ClusterTestTypeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ClusterTestTypeList{} + err = c.client.Get(). + Resource("clustertesttypes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterTestTypes. +func (c *clusterTestTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clustertesttypes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a clusterTestType and creates it. Returns the server's representation of the clusterTestType, and an error, if there is any. +func (c *clusterTestTypes) Create(clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { + result = &v1.ClusterTestType{} + err = c.client.Post(). + Resource("clustertesttypes"). + Body(clusterTestType). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterTestType and updates it. Returns the server's representation of the clusterTestType, and an error, if there is any. +func (c *clusterTestTypes) Update(clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { + result = &v1.ClusterTestType{} + err = c.client.Put(). + Resource("clustertesttypes"). + Name(clusterTestType.Name). + Body(clusterTestType). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *clusterTestTypes) UpdateStatus(clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { + result = &v1.ClusterTestType{} + err = c.client.Put(). + Resource("clustertesttypes"). + Name(clusterTestType.Name). + SubResource("status"). + Body(clusterTestType). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterTestType and deletes it. Returns an error if one occurs. +func (c *clusterTestTypes) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("clustertesttypes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterTestTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clustertesttypes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterTestType. +func (c *clusterTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterTestType, err error) { + result = &v1.ClusterTestType{} + err = c.client.Patch(pt). + Resource("clustertesttypes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} + +// GetScale takes name of the clusterTestType, and returns the corresponding autoscaling.Scale object, and an error if there is any. +func (c *clusterTestTypes) GetScale(clusterTestTypeName string, options metav1.GetOptions) (result *autoscaling.Scale, err error) { + result = &autoscaling.Scale{} + err = c.client.Get(). + Resource("clustertesttypes"). + Name(clusterTestTypeName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *clusterTestTypes) UpdateScale(clusterTestTypeName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { + result = &autoscaling.Scale{} + err = c.client.Put(). + Resource("clustertesttypes"). + Name(clusterTestTypeName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/doc.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/doc.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/doc.go rename to vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/doc.go index 86602442babdc..3af5d054f1026 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/doc.go +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package internalversion +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/batch_client.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/example_client.go similarity index 51% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/batch_client.go rename to vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/example_client.go index ebfcb85a2649c..9fdbf4aad188b 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/batch_client.go +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/example_client.go @@ -16,34 +16,36 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package internalversion +package v1 import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" + v1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" + "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme" ) -type BatchInterface interface { +type ExampleV1Interface interface { RESTClient() rest.Interface - CronJobsGetter - JobsGetter + ClusterTestTypesGetter + TestTypesGetter } -// BatchClient is used to interact with features provided by the batch group. -type BatchClient struct { +// ExampleV1Client is used to interact with features provided by the example.crd.code-generator.k8s.io group. +type ExampleV1Client struct { restClient rest.Interface } -func (c *BatchClient) CronJobs(namespace string) CronJobInterface { - return newCronJobs(c, namespace) +func (c *ExampleV1Client) ClusterTestTypes() ClusterTestTypeInterface { + return newClusterTestTypes(c) } -func (c *BatchClient) Jobs(namespace string) JobInterface { - return newJobs(c, namespace) +func (c *ExampleV1Client) TestTypes(namespace string) TestTypeInterface { + return newTestTypes(c, namespace) } -// NewForConfig creates a new BatchClient for the given config. -func NewForConfig(c *rest.Config) (*BatchClient, error) { +// NewForConfig creates a new ExampleV1Client for the given config. +func NewForConfig(c *rest.Config) (*ExampleV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -52,12 +54,12 @@ func NewForConfig(c *rest.Config) (*BatchClient, error) { if err != nil { return nil, err } - return &BatchClient{client}, nil + return &ExampleV1Client{client}, nil } -// NewForConfigOrDie creates a new BatchClient for the given config and +// NewForConfigOrDie creates a new ExampleV1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *BatchClient { +func NewForConfigOrDie(c *rest.Config) *ExampleV1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -65,35 +67,27 @@ func NewForConfigOrDie(c *rest.Config) *BatchClient { return client } -// New creates a new BatchClient for the given RESTClient. -func New(c rest.Interface) *BatchClient { - return &BatchClient{c} +// New creates a new ExampleV1Client for the given RESTClient. +func New(c rest.Interface) *ExampleV1Client { + return &ExampleV1Client{c} } func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("batch")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("batch")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } return nil } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *BatchClient) RESTClient() rest.Interface { +func (c *ExampleV1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/BUILD.bazel b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/BUILD.bazel new file mode 100644 index 0000000000000..2b976c5836ea1 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "fake_clustertesttype.go", + "fake_example_client.go", + "fake_testtype.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake", + importpath = "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/generated_expansion.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/doc.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/generated_expansion.go rename to vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/doc.go index 1b59c8431cea3..16f44399065ed 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/generated_expansion.go +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/doc.go @@ -16,4 +16,5 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package internalversion +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go new file mode 100644 index 0000000000000..edf217df0da29 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go @@ -0,0 +1,152 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + examplev1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" +) + +// FakeClusterTestTypes implements ClusterTestTypeInterface +type FakeClusterTestTypes struct { + Fake *FakeExampleV1 +} + +var clustertesttypesResource = schema.GroupVersionResource{Group: "example.crd.code-generator.k8s.io", Version: "v1", Resource: "clustertesttypes"} + +var clustertesttypesKind = schema.GroupVersionKind{Group: "example.crd.code-generator.k8s.io", Version: "v1", Kind: "ClusterTestType"} + +// Get takes name of the clusterTestType, and returns the corresponding clusterTestType object, and an error if there is any. +func (c *FakeClusterTestTypes) Get(name string, options v1.GetOptions) (result *examplev1.ClusterTestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clustertesttypesResource, name), &examplev1.ClusterTestType{}) + if obj == nil { + return nil, err + } + return obj.(*examplev1.ClusterTestType), err +} + +// List takes label and field selectors, and returns the list of ClusterTestTypes that match those selectors. +func (c *FakeClusterTestTypes) List(opts v1.ListOptions) (result *examplev1.ClusterTestTypeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clustertesttypesResource, clustertesttypesKind, opts), &examplev1.ClusterTestTypeList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &examplev1.ClusterTestTypeList{ListMeta: obj.(*examplev1.ClusterTestTypeList).ListMeta} + for _, item := range obj.(*examplev1.ClusterTestTypeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterTestTypes. +func (c *FakeClusterTestTypes) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clustertesttypesResource, opts)) +} + +// Create takes the representation of a clusterTestType and creates it. Returns the server's representation of the clusterTestType, and an error, if there is any. +func (c *FakeClusterTestTypes) Create(clusterTestType *examplev1.ClusterTestType) (result *examplev1.ClusterTestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clustertesttypesResource, clusterTestType), &examplev1.ClusterTestType{}) + if obj == nil { + return nil, err + } + return obj.(*examplev1.ClusterTestType), err +} + +// Update takes the representation of a clusterTestType and updates it. Returns the server's representation of the clusterTestType, and an error, if there is any. +func (c *FakeClusterTestTypes) Update(clusterTestType *examplev1.ClusterTestType) (result *examplev1.ClusterTestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clustertesttypesResource, clusterTestType), &examplev1.ClusterTestType{}) + if obj == nil { + return nil, err + } + return obj.(*examplev1.ClusterTestType), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterTestTypes) UpdateStatus(clusterTestType *examplev1.ClusterTestType) (*examplev1.ClusterTestType, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(clustertesttypesResource, "status", clusterTestType), &examplev1.ClusterTestType{}) + if obj == nil { + return nil, err + } + return obj.(*examplev1.ClusterTestType), err +} + +// Delete takes name of the clusterTestType and deletes it. Returns an error if one occurs. +func (c *FakeClusterTestTypes) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clustertesttypesResource, name), &examplev1.ClusterTestType{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clustertesttypesResource, listOptions) + + _, err := c.Fake.Invokes(action, &examplev1.ClusterTestTypeList{}) + return err +} + +// Patch applies the patch and returns the patched clusterTestType. +func (c *FakeClusterTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.ClusterTestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clustertesttypesResource, name, pt, data, subresources...), &examplev1.ClusterTestType{}) + if obj == nil { + return nil, err + } + return obj.(*examplev1.ClusterTestType), err +} + +// GetScale takes name of the clusterTestType, and returns the corresponding scale object, and an error if there is any. +func (c *FakeClusterTestTypes) GetScale(clusterTestTypeName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetSubresourceAction(clustertesttypesResource, "scale", clusterTestTypeName), &autoscaling.Scale{}) + if obj == nil { + return nil, err + } + return obj.(*autoscaling.Scale), err +} + +// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *FakeClusterTestTypes) UpdateScale(clusterTestTypeName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(clustertesttypesResource, "scale", scale), &autoscaling.Scale{}) + if obj == nil { + return nil, err + } + return obj.(*autoscaling.Scale), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_example_client.go similarity index 52% rename from vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go rename to vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_example_client.go index f8d6a7fb0f800..265930a0e4863 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_example_client.go @@ -16,33 +16,29 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1beta2 +package fake import ( rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1 "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1" ) -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface +type FakeExampleV1 struct { + *testing.Fake } -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion +func (c *FakeExampleV1) ClusterTestTypes() v1.ClusterTestTypeInterface { + return &FakeClusterTestTypes{c} } -// scales implements ScaleInterface -type scales struct { - client rest.Interface - ns string +func (c *FakeExampleV1) TestTypes(namespace string) v1.TestTypeInterface { + return &FakeTestTypes{c, namespace} } -// newScales returns a Scales -func newScales(c *AppsV1beta2Client, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeExampleV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret } diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_testtype.go new file mode 100644 index 0000000000000..2ff811a96920a --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_testtype.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + examplev1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" +) + +// FakeTestTypes implements TestTypeInterface +type FakeTestTypes struct { + Fake *FakeExampleV1 + ns string +} + +var testtypesResource = schema.GroupVersionResource{Group: "example.crd.code-generator.k8s.io", Version: "v1", Resource: "testtypes"} + +var testtypesKind = schema.GroupVersionKind{Group: "example.crd.code-generator.k8s.io", Version: "v1", Kind: "TestType"} + +// Get takes name of the testType, and returns the corresponding testType object, and an error if there is any. +func (c *FakeTestTypes) Get(name string, options v1.GetOptions) (result *examplev1.TestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(testtypesResource, c.ns, name), &examplev1.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*examplev1.TestType), err +} + +// List takes label and field selectors, and returns the list of TestTypes that match those selectors. +func (c *FakeTestTypes) List(opts v1.ListOptions) (result *examplev1.TestTypeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(testtypesResource, testtypesKind, c.ns, opts), &examplev1.TestTypeList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &examplev1.TestTypeList{ListMeta: obj.(*examplev1.TestTypeList).ListMeta} + for _, item := range obj.(*examplev1.TestTypeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested testTypes. +func (c *FakeTestTypes) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(testtypesResource, c.ns, opts)) + +} + +// Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. +func (c *FakeTestTypes) Create(testType *examplev1.TestType) (result *examplev1.TestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(testtypesResource, c.ns, testType), &examplev1.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*examplev1.TestType), err +} + +// Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. +func (c *FakeTestTypes) Update(testType *examplev1.TestType) (result *examplev1.TestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(testtypesResource, c.ns, testType), &examplev1.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*examplev1.TestType), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeTestTypes) UpdateStatus(testType *examplev1.TestType) (*examplev1.TestType, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(testtypesResource, "status", c.ns, testType), &examplev1.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*examplev1.TestType), err +} + +// Delete takes name of the testType and deletes it. Returns an error if one occurs. +func (c *FakeTestTypes) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(testtypesResource, c.ns, name), &examplev1.TestType{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(testtypesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &examplev1.TestTypeList{}) + return err +} + +// Patch applies the patch and returns the patched testType. +func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.TestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &examplev1.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*examplev1.TestType), err +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/generated_expansion.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/generated_expansion.go similarity index 87% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/generated_expansion.go rename to vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/generated_expansion.go index e8f42d1c5b64b..3059734a9ea65 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/generated_expansion.go +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/generated_expansion.go @@ -16,8 +16,8 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package internalversion +package v1 -type CronJobExpansion interface{} +type ClusterTestTypeExpansion interface{} -type JobExpansion interface{} +type TestTypeExpansion interface{} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/testtype.go new file mode 100644 index 0000000000000..9944e3e7c7249 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/testtype.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" + scheme "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme" +) + +// TestTypesGetter has a method to return a TestTypeInterface. +// A group's client should implement this interface. +type TestTypesGetter interface { + TestTypes(namespace string) TestTypeInterface +} + +// TestTypeInterface has methods to work with TestType resources. +type TestTypeInterface interface { + Create(*v1.TestType) (*v1.TestType, error) + Update(*v1.TestType) (*v1.TestType, error) + UpdateStatus(*v1.TestType) (*v1.TestType, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.TestType, error) + List(opts metav1.ListOptions) (*v1.TestTypeList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) + TestTypeExpansion +} + +// testTypes implements TestTypeInterface +type testTypes struct { + client rest.Interface + ns string +} + +// newTestTypes returns a TestTypes +func newTestTypes(c *ExampleV1Client, namespace string) *testTypes { + return &testTypes{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the testType, and returns the corresponding testType object, and an error if there is any. +func (c *testTypes) Get(name string, options metav1.GetOptions) (result *v1.TestType, err error) { + result = &v1.TestType{} + err = c.client.Get(). + Namespace(c.ns). + Resource("testtypes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of TestTypes that match those selectors. +func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.TestTypeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested testTypes. +func (c *testTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. +func (c *testTypes) Create(testType *v1.TestType) (result *v1.TestType, err error) { + result = &v1.TestType{} + err = c.client.Post(). + Namespace(c.ns). + Resource("testtypes"). + Body(testType). + Do(). + Into(result) + return +} + +// Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. +func (c *testTypes) Update(testType *v1.TestType) (result *v1.TestType, err error) { + result = &v1.TestType{} + err = c.client.Put(). + Namespace(c.ns). + Resource("testtypes"). + Name(testType.Name). + Body(testType). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *testTypes) UpdateStatus(testType *v1.TestType) (result *v1.TestType, err error) { + result = &v1.TestType{} + err = c.client.Put(). + Namespace(c.ns). + Resource("testtypes"). + Name(testType.Name). + SubResource("status"). + Body(testType). + Do(). + Into(result) + return +} + +// Delete takes name of the testType and deletes it. Returns an error if one occurs. +func (c *testTypes) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("testtypes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *testTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("testtypes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched testType. +func (c *testTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) { + result = &v1.TestType{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("testtypes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/BUILD.bazel b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/BUILD.bazel new file mode 100644 index 0000000000000..3adf278b264f9 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "factory.go", + "generic.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions", + importpath = "k8s.io/code-generator/_examples/MixedCase/informers/externalversions", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces:go_default_library", + ], +) diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/BUILD.bazel b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/BUILD.bazel new file mode 100644 index 0000000000000..807c40ca5cf19 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["interface.go"], + importmap = "k8s.io/kops/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example", + importpath = "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces:go_default_library", + ], +) diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/interface.go b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/interface.go new file mode 100644 index 0000000000000..a874fdc5d8955 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package example + +import ( + v1 "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1" + internalinterfaces "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/BUILD.bazel b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/BUILD.bazel new file mode 100644 index 0000000000000..58b78b7812b12 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "clustertesttype.go", + "interface.go", + "testtype.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1", + importpath = "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1:go_default_library", + ], +) diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/clustertesttype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/clustertesttype.go new file mode 100644 index 0000000000000..e0607c11a0507 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/clustertesttype.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + examplev1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" + versioned "k8s.io/code-generator/_examples/MixedCase/clientset/versioned" + internalinterfaces "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces" + v1 "k8s.io/code-generator/_examples/MixedCase/listers/example/v1" +) + +// ClusterTestTypeInformer provides access to a shared informer and lister for +// ClusterTestTypes. +type ClusterTestTypeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterTestTypeLister +} + +type clusterTestTypeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterTestTypeInformer constructs a new informer for ClusterTestType type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterTestTypeInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterTestTypeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterTestTypeInformer constructs a new informer for ClusterTestType type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterTestTypeInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExampleV1().ClusterTestTypes().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExampleV1().ClusterTestTypes().Watch(options) + }, + }, + &examplev1.ClusterTestType{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterTestTypeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterTestTypeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterTestTypeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&examplev1.ClusterTestType{}, f.defaultInformer) +} + +func (f *clusterTestTypeInformer) Lister() v1.ClusterTestTypeLister { + return v1.NewClusterTestTypeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/interface.go b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/interface.go new file mode 100644 index 0000000000000..5389d07fbb372 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterTestTypes returns a ClusterTestTypeInformer. + ClusterTestTypes() ClusterTestTypeInformer + // TestTypes returns a TestTypeInformer. + TestTypes() TestTypeInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterTestTypes returns a ClusterTestTypeInformer. +func (v *version) ClusterTestTypes() ClusterTestTypeInformer { + return &clusterTestTypeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// TestTypes returns a TestTypeInformer. +func (v *version) TestTypes() TestTypeInformer { + return &testTypeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/testtype.go new file mode 100644 index 0000000000000..18f3b88d0d3df --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/testtype.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + examplev1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" + versioned "k8s.io/code-generator/_examples/MixedCase/clientset/versioned" + internalinterfaces "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces" + v1 "k8s.io/code-generator/_examples/MixedCase/listers/example/v1" +) + +// TestTypeInformer provides access to a shared informer and lister for +// TestTypes. +type TestTypeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.TestTypeLister +} + +type testTypeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewTestTypeInformer constructs a new informer for TestType type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewTestTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredTestTypeInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredTestTypeInformer constructs a new informer for TestType type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredTestTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExampleV1().TestTypes(namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExampleV1().TestTypes(namespace).Watch(options) + }, + }, + &examplev1.TestType{}, + resyncPeriod, + indexers, + ) +} + +func (f *testTypeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredTestTypeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *testTypeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&examplev1.TestType{}, f.defaultInformer) +} + +func (f *testTypeInformer) Lister() v1.TestTypeLister { + return v1.NewTestTypeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/factory.go b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/factory.go new file mode 100644 index 0000000000000..5a2d8f74875a6 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/factory.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" + versioned "k8s.io/code-generator/_examples/MixedCase/clientset/versioned" + example "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example" + internalinterfaces "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Example() example.Interface +} + +func (f *sharedInformerFactory) Example() example.Interface { + return example.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/generic.go b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/generic.go new file mode 100644 index 0000000000000..e039c8edfb568 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/generic.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" + v1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=example.crd.code-generator.k8s.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("clustertesttypes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Example().V1().ClusterTestTypes().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("testtypes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Example().V1().TestTypes().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces/BUILD.bazel b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces/BUILD.bazel new file mode 100644 index 0000000000000..d9c9edd4f3892 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["factory_interfaces.go"], + importmap = "k8s.io/kops/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces", + importpath = "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned:go_default_library", + ], +) diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000..2ed31b44dd6c6 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" + versioned "k8s.io/code-generator/_examples/MixedCase/clientset/versioned" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/BUILD.bazel b/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/BUILD.bazel new file mode 100644 index 0000000000000..be15371ecb2ac --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "clustertesttype.go", + "expansion_generated.go", + "testtype.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1", + importpath = "k8s.io/code-generator/_examples/MixedCase/listers/example/v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1:go_default_library", + ], +) diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/clustertesttype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/clustertesttype.go new file mode 100644 index 0000000000000..8e93b73f91860 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/clustertesttype.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" +) + +// ClusterTestTypeLister helps list ClusterTestTypes. +type ClusterTestTypeLister interface { + // List lists all ClusterTestTypes in the indexer. + List(selector labels.Selector) (ret []*v1.ClusterTestType, err error) + // Get retrieves the ClusterTestType from the index for a given name. + Get(name string) (*v1.ClusterTestType, error) + ClusterTestTypeListerExpansion +} + +// clusterTestTypeLister implements the ClusterTestTypeLister interface. +type clusterTestTypeLister struct { + indexer cache.Indexer +} + +// NewClusterTestTypeLister returns a new ClusterTestTypeLister. +func NewClusterTestTypeLister(indexer cache.Indexer) ClusterTestTypeLister { + return &clusterTestTypeLister{indexer: indexer} +} + +// List lists all ClusterTestTypes in the indexer. +func (s *clusterTestTypeLister) List(selector labels.Selector) (ret []*v1.ClusterTestType, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterTestType)) + }) + return ret, err +} + +// Get retrieves the ClusterTestType from the index for a given name. +func (s *clusterTestTypeLister) Get(name string) (*v1.ClusterTestType, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("clustertesttype"), name) + } + return obj.(*v1.ClusterTestType), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/generated_expansion.go b/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/expansion_generated.go similarity index 54% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/generated_expansion.go rename to vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/expansion_generated.go index 4cc8f66b772dc..2681a29f4726f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/generated_expansion.go +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/expansion_generated.go @@ -14,12 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by client-gen. DO NOT EDIT. +// Code generated by lister-gen. DO NOT EDIT. -package internalversion +package v1 -type InitializerConfigurationExpansion interface{} +// ClusterTestTypeListerExpansion allows custom methods to be added to +// ClusterTestTypeLister. +type ClusterTestTypeListerExpansion interface{} -type MutatingWebhookConfigurationExpansion interface{} +// TestTypeListerExpansion allows custom methods to be added to +// TestTypeLister. +type TestTypeListerExpansion interface{} -type ValidatingWebhookConfigurationExpansion interface{} +// TestTypeNamespaceListerExpansion allows custom methods to be added to +// TestTypeNamespaceLister. +type TestTypeNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/testtype.go new file mode 100644 index 0000000000000..292dcedd0b344 --- /dev/null +++ b/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/testtype.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" +) + +// TestTypeLister helps list TestTypes. +type TestTypeLister interface { + // List lists all TestTypes in the indexer. + List(selector labels.Selector) (ret []*v1.TestType, err error) + // TestTypes returns an object that can list and get TestTypes. + TestTypes(namespace string) TestTypeNamespaceLister + TestTypeListerExpansion +} + +// testTypeLister implements the TestTypeLister interface. +type testTypeLister struct { + indexer cache.Indexer +} + +// NewTestTypeLister returns a new TestTypeLister. +func NewTestTypeLister(indexer cache.Indexer) TestTypeLister { + return &testTypeLister{indexer: indexer} +} + +// List lists all TestTypes in the indexer. +func (s *testTypeLister) List(selector labels.Selector) (ret []*v1.TestType, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.TestType)) + }) + return ret, err +} + +// TestTypes returns an object that can list and get TestTypes. +func (s *testTypeLister) TestTypes(namespace string) TestTypeNamespaceLister { + return testTypeNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// TestTypeNamespaceLister helps list and get TestTypes. +type TestTypeNamespaceLister interface { + // List lists all TestTypes in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.TestType, err error) + // Get retrieves the TestType from the indexer for a given namespace and name. + Get(name string) (*v1.TestType, error) + TestTypeNamespaceListerExpansion +} + +// testTypeNamespaceLister implements the TestTypeNamespaceLister +// interface. +type testTypeNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all TestTypes in the indexer for a given namespace. +func (s testTypeNamespaceLister) List(selector labels.Selector) (ret []*v1.TestType, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.TestType)) + }) + return ret, err +} + +// Get retrieves the TestType from the indexer for a given namespace and name. +func (s testTypeNamespaceLister) Get(name string) (*v1.TestType, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("testtype"), name) + } + return obj.(*v1.TestType), nil +} diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/doc.go b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/doc.go index b221d7eb49c84..3285a056fa94d 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/doc.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=example.apiserver.code-generator.k8s.io + package example // import "k8s.io/code-generator/_examples/apiserver/apis/example" diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/v1/doc.go b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/v1/doc.go index 5b6bd5b306f88..6b1fe6c11968c 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/v1/doc.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/v1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +k8s:conversion-gen=k8s.io/code-generator/_examples/apiserver/apis/example // +groupName=example.apiserver.code-generator.k8s.io + package v1 diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/doc.go b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/doc.go index 3864803757fe6..0edb56dcddb9e 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/doc.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=example.test.apiserver.code-generator.k8s.io // +groupGoName=SecondExample + package example2 // import "k8s.io/code-generator/_examples/apiserver/apis/example2" diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/doc.go b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/doc.go index 36bd4549cd5cb..211aefc8c4ab7 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/doc.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=example.test.apiserver.code-generator.k8s.io // +k8s:conversion-gen=k8s.io/code-generator/_examples/apiserver/apis/example2 // +groupGoName=SecondExample + package v1 diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go index 7b247ca950792..abff7bb0d67cf 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched testType. func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *example.TestType, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, data, subresources...), &example.TestType{}) + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example.TestType{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go index d19392e85343f..a06b946169284 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *testTypes) Get(name string, options v1.GetOptions) (result *example.Tes // List takes label and field selectors, and returns the list of TestTypes that match those selectors. func (c *testTypes) List(opts v1.ListOptions) (result *example.TestTypeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &example.TestTypeList{} err = c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *testTypes) List(opts v1.ListOptions) (result *example.TestTypeList, err // Watch returns a watch.Interface that watches the requested testTypes. func (c *testTypes) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *testTypes) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *testTypes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go index 2890ff19d5a4a..0d63cf1033dfd 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched testType. func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *example2.TestType, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, data, subresources...), &example2.TestType{}) + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example2.TestType{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go index 1a2ca7891a5a3..5380b86ce7a89 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *testTypes) Get(name string, options v1.GetOptions) (result *example2.Te // List takes label and field selectors, and returns the list of TestTypes that match those selectors. func (c *testTypes) List(opts v1.ListOptions) (result *example2.TestTypeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &example2.TestTypeList{} err = c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *testTypes) List(opts v1.ListOptions) (result *example2.TestTypeList, er // Watch returns a watch.Interface that watches the requested testTypes. func (c *testTypes) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *testTypes) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *testTypes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go index 6847f17d55cde..f7e2aacdee0b8 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched testType. func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.TestType, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, data, subresources...), &examplev1.TestType{}) + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &examplev1.TestType{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go index 2a0606512f983..e25fd1fc211cc 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *testTypes) Get(name string, options metav1.GetOptions) (result *v1.Test // List takes label and field selectors, and returns the list of TestTypes that match those selectors. func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.TestTypeList{} err = c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err // Watch returns a watch.Interface that watches the requested testTypes. func (c *testTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *testTypes) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *testTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go index 51a3f3e7da474..ce0782e6e08ec 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched testType. func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *example2v1.TestType, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, data, subresources...), &example2v1.TestType{}) + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example2v1.TestType{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go index b29bd8118bb74..f5afee94ccf73 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *testTypes) Get(name string, options metav1.GetOptions) (result *v1.Test // List takes label and field selectors, and returns the list of TestTypes that match those selectors. func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.TestTypeList{} err = c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err // Watch returns a watch.Interface that watches the requested testTypes. func (c *testTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *testTypes) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *testTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go index 6318f5284168e..21c6669028e94 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -27,6 +27,7 @@ import ( versioned "k8s.io/code-generator/_examples/apiserver/clientset/versioned" ) +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -35,4 +36,5 @@ type SharedInformerFactory interface { InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go index 03c679244d5ac..5d01820f9c6b4 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go @@ -27,6 +27,7 @@ import ( internalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion" ) +// NewInformerFunc takes internalversion.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(internalversion.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -35,4 +36,5 @@ type SharedInformerFactory interface { InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/code-generator/_examples/crd/apis/example/v1/doc.go b/vendor/k8s.io/code-generator/_examples/crd/apis/example/v1/doc.go index e6614c0da665a..673ac55d7b442 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/apis/example/v1/doc.go +++ b/vendor/k8s.io/code-generator/_examples/crd/apis/example/v1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:defaulter-gen=TypeMeta // +groupName=example.crd.code-generator.k8s.io + package v1 diff --git a/vendor/k8s.io/code-generator/_examples/crd/apis/example2/v1/doc.go b/vendor/k8s.io/code-generator/_examples/crd/apis/example2/v1/doc.go index 6521d83ff3918..5d1cbec5efbc7 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/apis/example2/v1/doc.go +++ b/vendor/k8s.io/code-generator/_examples/crd/apis/example2/v1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=example.test.crd.code-generator.k8s.io // +groupGoName=SecondExample + package v1 diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/clustertesttype.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/clustertesttype.go index 30e0e74980296..1217dd8673da3 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/clustertesttype.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/clustertesttype.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -77,10 +79,15 @@ func (c *clusterTestTypes) Get(name string, options metav1.GetOptions) (result * // List takes label and field selectors, and returns the list of ClusterTestTypes that match those selectors. func (c *clusterTestTypes) List(opts metav1.ListOptions) (result *v1.ClusterTestTypeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ClusterTestTypeList{} err = c.client.Get(). Resource("clustertesttypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,10 +95,15 @@ func (c *clusterTestTypes) List(opts metav1.ListOptions) (result *v1.ClusterTest // Watch returns a watch.Interface that watches the requested clusterTestTypes. func (c *clusterTestTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clustertesttypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -145,9 +157,14 @@ func (c *clusterTestTypes) Delete(name string, options *metav1.DeleteOptions) er // DeleteCollection deletes a collection of objects. func (c *clusterTestTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clustertesttypes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go index e0b0376b84f53..92d87b12fbab2 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go @@ -124,7 +124,7 @@ func (c *FakeClusterTestTypes) DeleteCollection(options *v1.DeleteOptions, listO // Patch applies the patch and returns the patched clusterTestType. func (c *FakeClusterTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.ClusterTestType, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustertesttypesResource, name, data, subresources...), &examplev1.ClusterTestType{}) + Invokes(testing.NewRootPatchSubresourceAction(clustertesttypesResource, name, pt, data, subresources...), &examplev1.ClusterTestType{}) if obj == nil { return nil, err } @@ -134,7 +134,7 @@ func (c *FakeClusterTestTypes) Patch(name string, pt types.PatchType, data []byt // GetScale takes name of the clusterTestType, and returns the corresponding scale object, and an error if there is any. func (c *FakeClusterTestTypes) GetScale(clusterTestTypeName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetSubresourceAction(clustertesttypesResource, clusterTestTypeName), &autoscaling.Scale{}) + Invokes(testing.NewRootGetSubresourceAction(clustertesttypesResource, "scale", clusterTestTypeName), &autoscaling.Scale{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go index b284a57978541..3ef9885d2c9d4 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched testType. func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.TestType, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, data, subresources...), &examplev1.TestType{}) + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &examplev1.TestType{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go index 6fafb1e1b1521..164b0510e47de 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *testTypes) Get(name string, options metav1.GetOptions) (result *v1.Test // List takes label and field selectors, and returns the list of TestTypes that match those selectors. func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.TestTypeList{} err = c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err // Watch returns a watch.Interface that watches the requested testTypes. func (c *testTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *testTypes) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *testTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go index 7c37bd8163e10..c4efc6597d33c 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched testType. func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *example2v1.TestType, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, data, subresources...), &example2v1.TestType{}) + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example2v1.TestType{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go index 69b0e64c5281a..2e3194e00abf9 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *testTypes) Get(name string, options metav1.GetOptions) (result *v1.Test // List takes label and field selectors, and returns the list of TestTypes that match those selectors. func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.TestTypeList{} err = c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err // Watch returns a watch.Interface that watches the requested testTypes. func (c *testTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *testTypes) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *testTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go index 02e0d6ac351ad..86d63f376f45f 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -27,6 +27,7 @@ import ( versioned "k8s.io/code-generator/_examples/crd/clientset/versioned" ) +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -35,4 +36,5 @@ type SharedInformerFactory interface { InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/BUILD.bazel b/vendor/k8s.io/code-generator/cmd/client-gen/BUILD.bazel index a60c143423be8..8acf3616983d4 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/BUILD.bazel +++ b/vendor/k8s.io/code-generator/cmd/client-gen/BUILD.bazel @@ -7,12 +7,12 @@ go_library( importpath = "k8s.io/code-generator/cmd/client-gen", visibility = ["//visibility:private"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/args:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/generators:go_default_library", "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/BUILD.bazel b/vendor/k8s.io/code-generator/cmd/client-gen/generators/BUILD.bazel index 9d0b165cec4b1..8503682f969e1 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/BUILD.bazel +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/BUILD.bazel @@ -14,7 +14,6 @@ go_library( importpath = "k8s.io/code-generator/cmd/client-gen/generators", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/args:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/generators/fake:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme:go_default_library", @@ -25,5 +24,6 @@ go_library( "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", "//vendor/k8s.io/gengo/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go index 335e995c0a2cf..ee6ebbcf0937a 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -32,7 +32,7 @@ import ( "k8s.io/gengo/namer" "k8s.io/gengo/types" - "github.com/golang/glog" + "k8s.io/klog" ) // NameSystems returns the name system used by the generators in this package. @@ -130,7 +130,7 @@ func DefaultNameSystem() string { } func packageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetPackage string, groupPackageName string, groupGoName string, apiPath string, srcTreePath string, inputPackage string, boilerplate []byte) generator.Package { - groupVersionClientPackage := strings.ToLower(filepath.Join(clientsetPackage, "typed", groupPackageName, gv.Version.NonEmpty())) + groupVersionClientPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty())) return &generator.DefaultPackage{ PackageName: strings.ToLower(gv.Version.NonEmpty()), PackagePath: groupVersionClientPackage, @@ -318,12 +318,12 @@ func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.Cust func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - glog.Fatalf("Failed loading boilerplate: %v", err) + klog.Fatalf("Failed loading boilerplate: %v", err) } customArgs, ok := arguments.CustomArgs.(*clientgenargs.CustomArgs) if !ok { - glog.Fatalf("cannot convert arguments.CustomArgs to clientgenargs.CustomArgs") + klog.Fatalf("cannot convert arguments.CustomArgs to clientgenargs.CustomArgs") } includedTypesOverrides := customArgs.IncludedTypesOverrides diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go index ec439c2f7ae45..4b3854be6e1bb 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go @@ -30,9 +30,9 @@ import ( ) func PackageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetPackage string, groupPackageName string, groupGoName string, inputPackage string, boilerplate []byte) generator.Package { - outputPackage := strings.ToLower(filepath.Join(clientsetPackage, "typed", groupPackageName, gv.Version.NonEmpty(), "fake")) + outputPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty()), "fake") // TODO: should make this a function, called by here and in client-generator.go - realClientPackage := filepath.Join(clientsetPackage, "typed", groupPackageName, gv.Version.NonEmpty()) + realClientPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty())) return &generator.DefaultPackage{ PackageName: "fake", PackagePath: outputPackage, diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go index f1225acb3d7d0..61b3334f4018e 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go @@ -60,12 +60,12 @@ func (g *genClientset) Imports(c *generator.Context) (imports []string) { imports = append(imports, g.imports.ImportLines()...) for _, group := range g.groups { for _, version := range group.Versions { - groupClientPackage := filepath.Join(g.fakeClientsetPackage, "typed", group.PackageName, version.NonEmpty()) + groupClientPackage := filepath.Join(g.fakeClientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty())) fakeGroupClientPackage := filepath.Join(groupClientPackage, "fake") groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}]) - imports = append(imports, strings.ToLower(fmt.Sprintf("%s%s \"%s\"", groupAlias, version.NonEmpty(), groupClientPackage))) - imports = append(imports, strings.ToLower(fmt.Sprintf("fake%s%s \"%s\"", groupAlias, version.NonEmpty(), fakeGroupClientPackage))) + imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), groupClientPackage)) + imports = append(imports, fmt.Sprintf("fake%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), fakeGroupClientPackage)) } } // the package that has the clientset Interface diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go index 675fa5f6f7991..8f4d5785ef94d 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go @@ -64,7 +64,7 @@ func (g *genFakeForGroup) Namers(c *generator.Context) namer.NameSystems { func (g *genFakeForGroup) Imports(c *generator.Context) (imports []string) { imports = g.imports.ImportLines() if len(g.types) != 0 { - imports = append(imports, strings.ToLower(fmt.Sprintf("%s \"%s\"", filepath.Base(g.realClientPackage), g.realClientPackage))) + imports = append(imports, fmt.Sprintf("%s \"%s\"", strings.ToLower(filepath.Base(g.realClientPackage)), g.realClientPackage)) } return imports } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go index bf18c14c6428f..f5888aef15d86 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go @@ -362,7 +362,7 @@ var getSubresourceTemplate = ` func (c *Fake$.type|publicPlural$) Get($.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { obj, err := c.Fake. $if .namespaced$Invokes($.NewGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, "$.subresourcePath$", $.type|private$Name), &$.resultType|raw${}) - $else$Invokes($.NewRootGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, $.type|private$Name), &$.resultType|raw${})$end$ + $else$Invokes($.NewRootGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.type|private$Name), &$.resultType|raw${})$end$ if obj == nil { return nil, err } @@ -469,8 +469,8 @@ var patchTemplate = ` // Patch applies the patch and returns the patched $.resultType|private$. func (c *Fake$.type|publicPlural$) Patch(name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error) { obj, err := c.Fake. - $if .namespaced$Invokes($.NewPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, name, data, subresources... ), &$.resultType|raw${}) - $else$Invokes($.NewRootPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, name, data, subresources...), &$.resultType|raw${})$end$ + $if .namespaced$Invokes($.NewPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, name, pt, data, subresources... ), &$.resultType|raw${}) + $else$Invokes($.NewRootPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, name, pt, data, subresources...), &$.resultType|raw${})$end$ if obj == nil { return nil, err } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go index 18ec09ac6fb1d..6fdb29a94ac00 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go @@ -58,9 +58,9 @@ func (g *genClientset) Imports(c *generator.Context) (imports []string) { imports = append(imports, g.imports.ImportLines()...) for _, group := range g.groups { for _, version := range group.Versions { - typedClientPath := filepath.Join(g.clientsetPackage, "typed", group.PackageName, version.NonEmpty()) + typedClientPath := filepath.Join(g.clientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty())) groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}]) - imports = append(imports, strings.ToLower(fmt.Sprintf("%s%s \"%s\"", groupAlias, version.NonEmpty(), typedClientPath))) + imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), typedClientPath)) } } return diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go index 92e2a97f1ded1..3e8fc7c4c6882 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go @@ -387,11 +387,16 @@ func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privateP var listTemplate = ` // List takes label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. func (c *$.type|privatePlural$) List(opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil{ + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &$.resultType|raw$List{} err = c.client.Get(). $if .namespaced$Namespace(c.ns).$end$ Resource("$.type|resource$"). VersionedParams(&opts, $.schemeParameterCodec|raw$). + Timeout(timeout). Do(). Into(result) return @@ -401,6 +406,10 @@ func (c *$.type|privatePlural$) List(opts $.ListOptions|raw$) (result *$.resultT var listSubresourceTemplate = ` // List takes $.type|raw$ name, label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. func (c *$.type|privatePlural$) List($.type|private$Name string, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil{ + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &$.resultType|raw$List{} err = c.client.Get(). $if .namespaced$Namespace(c.ns).$end$ @@ -408,6 +417,7 @@ func (c *$.type|privatePlural$) List($.type|private$Name string, opts $.ListOpti Name($.type|private$Name). SubResource("$.subresourcePath$"). VersionedParams(&opts, $.schemeParameterCodec|raw$). + Timeout(timeout). Do(). Into(result) return @@ -461,10 +471,15 @@ func (c *$.type|privatePlural$) Delete(name string, options *$.DeleteOptions|raw var deleteCollectionTemplate = ` // DeleteCollection deletes a collection of objects. func (c *$.type|privatePlural$) DeleteCollection(options *$.DeleteOptions|raw$, listOptions $.ListOptions|raw$) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil{ + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). $if .namespaced$Namespace(c.ns).$end$ Resource("$.type|resource$"). VersionedParams(&listOptions, $.schemeParameterCodec|raw$). + Timeout(timeout). Body(options). Do(). Error() @@ -553,11 +568,16 @@ func (c *$.type|privatePlural$) UpdateStatus($.type|private$ *$.type|raw$) (resu var watchTemplate = ` // Watch returns a $.watchInterface|raw$ that watches the requested $.type|privatePlural$. func (c *$.type|privatePlural$) Watch(opts $.ListOptions|raw$) ($.watchInterface|raw$, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil{ + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). $if .namespaced$Namespace(c.ns).$end$ Resource("$.type|resource$"). VersionedParams(&opts, $.schemeParameterCodec|raw$). + Timeout(timeout). Watch() } ` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go index 60cfbcc0f5089..a698a28b68195 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go @@ -69,10 +69,11 @@ func (g *GenScheme) Imports(c *generator.Context) (imports []string) { packagePath = filepath.Dir(packagePath) } packagePath = filepath.Join(packagePath, "install") - imports = append(imports, strings.ToLower(fmt.Sprintf("%s \"%s\"", groupAlias, path.Vendorless(packagePath)))) + + imports = append(imports, fmt.Sprintf("%s \"%s\"", groupAlias, path.Vendorless(packagePath))) break } else { - imports = append(imports, strings.ToLower(fmt.Sprintf("%s%s \"%s\"", groupAlias, version.Version.NonEmpty(), path.Vendorless(packagePath)))) + imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.Version.NonEmpty()), path.Vendorless(packagePath))) } } } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/main.go b/vendor/k8s.io/code-generator/cmd/client-gen/main.go index 22c28e35f8f0b..6e0d187f5cb67 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/main.go @@ -21,9 +21,9 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/gengo/args" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/client-gen/args" "k8s.io/code-generator/cmd/client-gen/generators" @@ -31,6 +31,7 @@ import ( ) func main() { + klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -52,7 +53,7 @@ func main() { } if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } if err := genericArgs.Execute( @@ -60,6 +61,6 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } } diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/BUILD.bazel b/vendor/k8s.io/code-generator/cmd/conversion-gen/BUILD.bazel index 170e1159874cf..e524e91a26961 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/BUILD.bazel +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/BUILD.bazel @@ -7,12 +7,12 @@ go_library( importpath = "k8s.io/code-generator/cmd/conversion-gen", visibility = ["//visibility:private"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/conversion-gen/args:go_default_library", "//vendor/k8s.io/code-generator/cmd/conversion-gen/generators:go_default_library", "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/BUILD.bazel b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/BUILD.bazel index 6e55f3b9a9c3e..099db8dad7508 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/BUILD.bazel +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/BUILD.bazel @@ -7,11 +7,11 @@ go_library( importpath = "k8s.io/code-generator/cmd/conversion-gen/generators", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/code-generator/cmd/conversion-gen/args:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", "//vendor/k8s.io/gengo/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go index 422237e11772e..775972d12318a 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go @@ -29,7 +29,7 @@ import ( "k8s.io/gengo/namer" "k8s.io/gengo/types" - "github.com/golang/glog" + "k8s.io/klog" conversionargs "k8s.io/code-generator/cmd/conversion-gen/args" ) @@ -124,10 +124,10 @@ type conversionFuncMap map[conversionPair]*types.Type // Returns all manually-defined conversion functions in the package. func getManualConversionFunctions(context *generator.Context, pkg *types.Package, manualMap conversionFuncMap) { if pkg == nil { - glog.Warningf("Skipping nil package passed to getManualConversionFunctions") + klog.Warningf("Skipping nil package passed to getManualConversionFunctions") return } - glog.V(5).Infof("Scanning for conversion functions in %v", pkg.Name) + klog.V(5).Infof("Scanning for conversion functions in %v", pkg.Name) scopeName := types.Ref(conversionPackagePath, "Scope").Name errorName := types.Ref("", "error").Name @@ -136,34 +136,34 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package for _, f := range pkg.Functions { if f.Underlying == nil || f.Underlying.Kind != types.Func { - glog.Errorf("Malformed function: %#v", f) + klog.Errorf("Malformed function: %#v", f) continue } if f.Underlying.Signature == nil { - glog.Errorf("Function without signature: %#v", f) + klog.Errorf("Function without signature: %#v", f) continue } - glog.V(8).Infof("Considering function %s", f.Name) + klog.V(8).Infof("Considering function %s", f.Name) signature := f.Underlying.Signature // Check whether the function is conversion function. // Note that all of them have signature: // func Convert_inType_To_outType(inType, outType, conversion.Scope) error if signature.Receiver != nil { - glog.V(8).Infof("%s has a receiver", f.Name) + klog.V(8).Infof("%s has a receiver", f.Name) continue } if len(signature.Parameters) != 3 || signature.Parameters[2].Name != scopeName { - glog.V(8).Infof("%s has wrong parameters", f.Name) + klog.V(8).Infof("%s has wrong parameters", f.Name) continue } if len(signature.Results) != 1 || signature.Results[0].Name != errorName { - glog.V(8).Infof("%s has wrong results", f.Name) + klog.V(8).Infof("%s has wrong results", f.Name) continue } inType := signature.Parameters[0] outType := signature.Parameters[1] if inType.Kind != types.Pointer || outType.Kind != types.Pointer { - glog.V(8).Infof("%s has wrong parameter types", f.Name) + klog.V(8).Infof("%s has wrong parameter types", f.Name) continue } // Now check if the name satisfies the convention. @@ -171,7 +171,7 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package args := argsFromType(inType.Elem, outType.Elem) sw.Do("Convert_$.inType|public$_To_$.outType|public$", args) if f.Name.Name == buffer.String() { - glog.V(4).Infof("Found conversion function %s", f.Name) + klog.V(4).Infof("Found conversion function %s", f.Name) key := conversionPair{inType.Elem, outType.Elem} // We might scan the same package twice, and that's OK. if v, ok := manualMap[key]; ok && v != nil && v.Name.Package != pkg.Path { @@ -181,9 +181,9 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package } else { // prevent user error when they don't get the correct conversion signature if strings.HasPrefix(f.Name.Name, "Convert_") { - glog.Errorf("Rename function %s %s -> %s to match expected conversion signature", f.Name.Package, f.Name.Name, buffer.String()) + klog.Errorf("Rename function %s %s -> %s to match expected conversion signature", f.Name.Package, f.Name.Name, buffer.String()) } - glog.V(8).Infof("%s has wrong name", f.Name) + klog.V(8).Infof("%s has wrong name", f.Name) } buffer.Reset() } @@ -192,7 +192,7 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - glog.Fatalf("Failed loading boilerplate: %v", err) + klog.Fatalf("Failed loading boilerplate: %v", err) } packages := generator.Packages{} @@ -220,7 +220,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat } processed[i] = true - glog.V(5).Infof("considering pkg %q", i) + klog.V(5).Infof("considering pkg %q", i) pkg := context.Universe[i] // typesPkg is where the versioned types are defined. Sometimes it is // different from pkg. For example, kubernetes core/v1 types are defined @@ -239,9 +239,9 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // in their doc.go file. peerPkgs := extractTag(pkg.Comments) if peerPkgs != nil { - glog.V(5).Infof(" tags: %q", peerPkgs) + klog.V(5).Infof(" tags: %q", peerPkgs) } else { - glog.V(5).Infof(" no tag") + klog.V(5).Infof(" no tag") continue } skipUnsafe := false @@ -255,14 +255,14 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat externalTypesValues := extractExternalTypesTag(pkg.Comments) if externalTypesValues != nil { if len(externalTypesValues) != 1 { - glog.Fatalf(" expect only one value for %q tag, got: %q", externalTypesTagName, externalTypesValues) + klog.Fatalf(" expect only one value for %q tag, got: %q", externalTypesTagName, externalTypesValues) } externalTypes := externalTypesValues[0] - glog.V(5).Infof(" external types tags: %q", externalTypes) + klog.V(5).Infof(" external types tags: %q", externalTypes) var err error typesPkg, err = context.AddDirectory(externalTypes) if err != nil { - glog.Fatalf("cannot import package %s", externalTypes) + klog.Fatalf("cannot import package %s", externalTypes) } // update context.Order to the latest context.Universe orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)} @@ -291,7 +291,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat context.AddDir(pp) p := context.Universe[pp] if nil == p { - glog.Fatalf("failed to find pkg: %s", pp) + klog.Fatalf("failed to find pkg: %s", pp) } getManualConversionFunctions(context, p, manualConversions) } @@ -335,7 +335,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // from being a candidate for unsafe conversion for k, v := range manualConversions { if isCopyOnly(v.CommentLines) { - glog.V(5).Infof("Conversion function %s will not block memory copy because it is copy-only", v.Name) + klog.V(5).Infof("Conversion function %s will not block memory copy because it is copy-only", v.Name) continue } // this type should be excluded from all equivalence, because the converter must be called. @@ -518,9 +518,9 @@ func (g *genConversion) convertibleOnlyWithinPackage(inType, outType *types.Type tagvals := extractTag(t.CommentLines) if tagvals != nil { if tagvals[0] != "false" { - glog.Fatalf("Type %v: unsupported %s value: %q", t, tagName, tagvals[0]) + klog.Fatalf("Type %v: unsupported %s value: %q", t, tagName, tagvals[0]) } - glog.V(5).Infof("type %v requests no conversion generation, skipping", t) + klog.V(5).Infof("type %v requests no conversion generation, skipping", t) return false } // TODO: Consider generating functions for other kinds too. @@ -582,10 +582,10 @@ func (g *genConversion) preexists(inType, outType *types.Type) (*types.Type, boo } func (g *genConversion) Init(c *generator.Context, w io.Writer) error { - if glog.V(5) { + if klog.V(5) { if m, ok := g.useUnsafe.(equalMemoryTypes); ok { var result []string - glog.Infof("All objects without identical memory layout:") + klog.Infof("All objects without identical memory layout:") for k, v := range m { if v { continue @@ -594,7 +594,7 @@ func (g *genConversion) Init(c *generator.Context, w io.Writer) error { } sort.Strings(result) for _, s := range result { - glog.Infof(s) + klog.Infof(s) } } } @@ -643,7 +643,7 @@ func (g *genConversion) Init(c *generator.Context, w io.Writer) error { } func (g *genConversion) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { - glog.V(5).Infof("generating for type %v", t) + klog.V(5).Infof("generating for type %v", t) peerType := getPeerTypeFor(c, t, g.peerPackages) sw := generator.NewSnippetWriter(w, c, "$", "$") g.generateConversion(t, peerType, sw) @@ -664,10 +664,10 @@ func (g *genConversion) generateConversion(inType, outType *types.Type, sw *gene // There is a public manual Conversion method: use it. } else if skipped := g.skippedFields[inType]; len(skipped) != 0 { // The inType had some fields we could not generate. - glog.Errorf("Warning: could not find nor generate a final Conversion function for %v -> %v", inType, outType) - glog.Errorf(" the following fields need manual conversion:") + klog.Errorf("Warning: could not find nor generate a final Conversion function for %v -> %v", inType, outType) + klog.Errorf(" the following fields need manual conversion:") for _, f := range skipped { - glog.Errorf(" - %v", f) + klog.Errorf(" - %v", f) } } else { // Emit a public conversion function. @@ -682,7 +682,7 @@ func (g *genConversion) generateConversion(inType, outType *types.Type, sw *gene // at any nesting level. This makes the autogenerator easy to understand, and // the compiler shouldn't care. func (g *genConversion) generateFor(inType, outType *types.Type, sw *generator.SnippetWriter) { - glog.V(5).Infof("generating %v -> %v", inType, outType) + klog.V(5).Infof("generating %v -> %v", inType, outType) var f func(*types.Type, *types.Type, *generator.SnippetWriter) switch inType.Kind { @@ -853,7 +853,7 @@ func (g *genConversion) doStruct(inType, outType *types.Type, sw *generator.Snip sw.Do("}\n", nil) continue } - glog.V(5).Infof("Skipped function %s because it is copy-only and we can use direct assignment", function.Name) + klog.V(5).Infof("Skipped function %s because it is copy-only and we can use direct assignment", function.Name) } // If we can't auto-convert, punt before we emit any code. diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go index f2b91cc2e29ad..698baa7db7a7f 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -38,9 +38,9 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/gengo/args" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/conversion-gen/args" "k8s.io/code-generator/cmd/conversion-gen/generators" @@ -48,6 +48,7 @@ import ( ) func main() { + klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -61,7 +62,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } // Run it. @@ -70,7 +71,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/BUILD.bazel b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/BUILD.bazel index 5034c835c7dc3..89c78f4ab62fa 100644 --- a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/BUILD.bazel +++ b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/BUILD.bazel @@ -7,12 +7,12 @@ go_library( importpath = "k8s.io/code-generator/cmd/deepcopy-gen", visibility = ["//visibility:private"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/deepcopy-gen/args:go_default_library", "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/examples/deepcopy-gen/generators:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go index cce65b772f810..96fb298734150 100644 --- a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go @@ -46,16 +46,17 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/gengo/args" "k8s.io/gengo/examples/deepcopy-gen/generators" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/deepcopy-gen/args" "k8s.io/code-generator/pkg/util" ) func main() { + klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -69,7 +70,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } // Run it. @@ -78,7 +79,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/cmd/defaulter-gen/BUILD.bazel b/vendor/k8s.io/code-generator/cmd/defaulter-gen/BUILD.bazel index 70e17f33d061a..7263859dd340a 100644 --- a/vendor/k8s.io/code-generator/cmd/defaulter-gen/BUILD.bazel +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/BUILD.bazel @@ -7,12 +7,12 @@ go_library( importpath = "k8s.io/code-generator/cmd/defaulter-gen", visibility = ["//visibility:private"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/defaulter-gen/args:go_default_library", "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/examples/defaulter-gen/generators:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go index 9d33f700b33c5..40bb875e52a4f 100644 --- a/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go @@ -45,16 +45,17 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/gengo/args" "k8s.io/gengo/examples/defaulter-gen/generators" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/defaulter-gen/args" "k8s.io/code-generator/pkg/util" ) func main() { + klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -68,7 +69,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } // Run it. @@ -77,7 +78,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/BUILD.bazel b/vendor/k8s.io/code-generator/cmd/informer-gen/BUILD.bazel index 959765c267759..53c35943f44a1 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/BUILD.bazel +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/BUILD.bazel @@ -7,12 +7,12 @@ go_library( importpath = "k8s.io/code-generator/cmd/informer-gen", visibility = ["//visibility:private"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/informer-gen/args:go_default_library", "//vendor/k8s.io/code-generator/cmd/informer-gen/generators:go_default_library", "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/BUILD.bazel b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/BUILD.bazel index fe0915f302c2a..dc4c79ee24a2e 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/BUILD.bazel +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/BUILD.bazel @@ -17,7 +17,6 @@ go_library( importpath = "k8s.io/code-generator/cmd/informer-gen/generators", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/generators/util:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/types:go_default_library", "//vendor/k8s.io/code-generator/cmd/informer-gen/args:go_default_library", @@ -25,5 +24,6 @@ go_library( "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", "//vendor/k8s.io/gengo/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go index 5c557db7393f6..62ae109a4a24d 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go @@ -25,7 +25,7 @@ import ( "k8s.io/gengo/namer" "k8s.io/gengo/types" - "github.com/golang/glog" + "k8s.io/klog" ) // factoryGenerator produces a file of listers for a given GroupVersion and @@ -65,7 +65,7 @@ func (g *factoryGenerator) Imports(c *generator.Context) (imports []string) { func (g *factoryGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "{{", "}}") - glog.V(5).Infof("processing type %v", t) + klog.V(5).Infof("processing type %v", t) gvInterfaces := make(map[string]*types.Type) gvNewFuncs := make(map[string]*types.Type) diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go index c781804387062..fc0668c5bed29 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go @@ -23,7 +23,7 @@ import ( "k8s.io/gengo/namer" "k8s.io/gengo/types" - "github.com/golang/glog" + "k8s.io/klog" ) // factoryInterfaceGenerator produces a file of interfaces used to break a dependency cycle for @@ -60,7 +60,7 @@ func (g *factoryInterfaceGenerator) Imports(c *generator.Context) (imports []str func (g *factoryInterfaceGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "{{", "}}") - glog.V(5).Infof("processing type %v", t) + klog.V(5).Infof("processing type %v", t) m := map[string]interface{}{ "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), @@ -76,6 +76,7 @@ func (g *factoryInterfaceGenerator) GenerateType(c *generator.Context, t *types. } var externalSharedInformerFactoryInterface = ` +// NewInformerFunc takes {{.clientSetPackage|raw}} and {{.timeDuration|raw}} to return a SharedIndexInformer. type NewInformerFunc func({{.clientSetPackage|raw}}, {{.timeDuration|raw}}) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -84,5 +85,6 @@ type SharedInformerFactory interface { InformerFor(obj {{.runtimeObject|raw}}, newFunc NewInformerFunc) {{.cacheSharedIndexInformer|raw}} } +// TweakListOptionsFunc is a function that transforms a {{.v1ListOptions|raw}}. type TweakListOptionsFunc func(*{{.v1ListOptions|raw}}) ` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go index 88cc08df52fcb..9204d6215ac8b 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go @@ -28,7 +28,7 @@ import ( "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - "github.com/golang/glog" + "k8s.io/klog" ) // informerGenerator produces a file of listers for a given GroupVersion and @@ -66,7 +66,7 @@ func (g *informerGenerator) Imports(c *generator.Context) (imports []string) { func (g *informerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "$", "$") - glog.V(5).Infof("processing type %v", t) + klog.V(5).Infof("processing type %v", t) listerPackage := fmt.Sprintf("%s/%s/%s", g.listersPackage, g.groupPkgName, strings.ToLower(g.groupVersion.Version.NonEmpty())) clientSetInterface := c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"}) diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go index 2cc0372f89eee..cfb91cebac639 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go @@ -22,11 +22,11 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" + "k8s.io/klog" "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" @@ -102,12 +102,12 @@ func vendorless(p string) string { func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - glog.Fatalf("Failed loading boilerplate: %v", err) + klog.Fatalf("Failed loading boilerplate: %v", err) } customArgs, ok := arguments.CustomArgs.(*informergenargs.CustomArgs) if !ok { - glog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs) + klog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs) } internalVersionPackagePath := filepath.Join(arguments.OutputPackagePath) @@ -128,7 +128,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat objectMeta, internal, err := objectMetaForPackage(p) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } if objectMeta == nil { // no types in this package had genclient @@ -141,7 +141,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat if internal { lastSlash := strings.LastIndex(p.Path, "/") if lastSlash == -1 { - glog.Fatalf("error constructing internal group version for package %q", p.Path) + klog.Fatalf("error constructing internal group version for package %q", p.Path) } gv.Group = clientgentypes.Group(p.Path[lastSlash+1:]) targetGroupVersions = internalGroupVersions @@ -320,9 +320,9 @@ func versionPackage(basePackage string, groupPkgName string, gv clientgentypes.G DefaultGen: generator.DefaultGen{ OptionalName: "interface", }, - outputPackage: packagePath, - imports: generator.NewImportTracker(), - types: typesToGenerate, + outputPackage: packagePath, + imports: generator.NewImportTracker(), + types: typesToGenerate, internalInterfacesPackage: packageForInternalInterfaces(basePackage), }) diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/tags.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/tags.go index afa287815204a..d25d5b6304905 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/tags.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/tags.go @@ -17,8 +17,8 @@ limitations under the License. package generators import ( - "github.com/golang/glog" "k8s.io/gengo/types" + "k8s.io/klog" ) // extractBoolTagOrDie gets the comment-tags for the key and asserts that, if @@ -27,7 +27,7 @@ import ( func extractBoolTagOrDie(key string, lines []string) bool { val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return val } diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go index 1cd27d5cddde0..f80350c5f6a76 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go @@ -63,7 +63,7 @@ func (g *versionInterfaceGenerator) GenerateType(c *generator.Context, t *types. m := map[string]interface{}{ "interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}), "interfacesSharedInformerFactory": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}), - "types": g.types, + "types": g.types, } sw.Do(versionTemplate, m) diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/main.go b/vendor/k8s.io/code-generator/cmd/informer-gen/main.go index bfe826080cc99..14f3e923e6cc0 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/main.go @@ -20,16 +20,17 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/code-generator/cmd/informer-gen/generators" "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/informer-gen/args" ) func main() { + klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -47,7 +48,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } // Run it. @@ -56,7 +57,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/BUILD.bazel b/vendor/k8s.io/code-generator/cmd/lister-gen/BUILD.bazel index fbc7ad0588585..3122ab766c459 100644 --- a/vendor/k8s.io/code-generator/cmd/lister-gen/BUILD.bazel +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/BUILD.bazel @@ -7,12 +7,12 @@ go_library( importpath = "k8s.io/code-generator/cmd/lister-gen", visibility = ["//visibility:private"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/lister-gen/args:go_default_library", "//vendor/k8s.io/code-generator/cmd/lister-gen/generators:go_default_library", "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/BUILD.bazel b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/BUILD.bazel index 4fb93d28641e5..0759e8eadda7b 100644 --- a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/BUILD.bazel +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/BUILD.bazel @@ -11,12 +11,12 @@ go_library( importpath = "k8s.io/code-generator/cmd/lister-gen/generators", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/generators/util:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/types:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", "//vendor/k8s.io/gengo/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go index cde6e2f770a9c..c8ed5ad4d3be6 100644 --- a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go @@ -30,7 +30,7 @@ import ( "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - "github.com/golang/glog" + "k8s.io/klog" ) // NameSystems returns the name system used by the generators in this package. @@ -66,7 +66,7 @@ func DefaultNameSystem() string { func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - glog.Fatalf("Failed loading boilerplate: %v", err) + klog.Fatalf("Failed loading boilerplate: %v", err) } var packageList generator.Packages @@ -75,7 +75,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat objectMeta, internal, err := objectMetaForPackage(p) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } if objectMeta == nil { // no types in this package had genclient @@ -88,7 +88,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat if internal { lastSlash := strings.LastIndex(p.Path, "/") if lastSlash == -1 { - glog.Fatalf("error constructing internal group version for package %q", p.Path) + klog.Fatalf("error constructing internal group version for package %q", p.Path) } gv.Group = clientgentypes.Group(p.Path[lastSlash+1:]) internalGVPkg = p.Path @@ -223,7 +223,7 @@ func (g *listerGenerator) Imports(c *generator.Context) (imports []string) { func (g *listerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "$", "$") - glog.V(5).Infof("processing type %v", t) + klog.V(5).Infof("processing type %v", t) m := map[string]interface{}{ "Resource": c.Universe.Function(types.Name{Package: t.Name.Package, Name: "Resource"}), "type": t, diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/tags.go b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/tags.go index afa287815204a..d25d5b6304905 100644 --- a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/tags.go +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/tags.go @@ -17,8 +17,8 @@ limitations under the License. package generators import ( - "github.com/golang/glog" "k8s.io/gengo/types" + "k8s.io/klog" ) // extractBoolTagOrDie gets the comment-tags for the key and asserts that, if @@ -27,7 +27,7 @@ import ( func extractBoolTagOrDie(key string, lines []string) bool { val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return val } diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/main.go b/vendor/k8s.io/code-generator/cmd/lister-gen/main.go index d5ff8e46ee035..aca16b2bda39e 100644 --- a/vendor/k8s.io/code-generator/cmd/lister-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/main.go @@ -20,16 +20,17 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/code-generator/cmd/lister-gen/generators" "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/lister-gen/args" ) func main() { + klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -44,7 +45,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } // Run it. @@ -53,7 +54,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/cmd/openapi-gen/README b/vendor/k8s.io/code-generator/cmd/openapi-gen/README deleted file mode 100644 index e6dcc85d0d6aa..0000000000000 --- a/vendor/k8s.io/code-generator/cmd/openapi-gen/README +++ /dev/null @@ -1,13 +0,0 @@ -# Generate OpenAPI definitions - -- To generate definition for a specific type or package add "+k8s:openapi-gen=true" tag to the type/package comment lines. -- To exclude a type or a member from a tagged package/type, add "+k8s:openapi-gen=false" tag to the comment lines. - -# OpenAPI Extensions -OpenAPI spec can have extensions on types. To define one or more extensions on a type or its member -add "+k8s:openapi-gen=x-kubernetes-$NAME:$VALUE" to the comment lines before type/member. A type/member can -have multiple extensions. The rest of the line in the comment will be used as $VALUE so there is no need to -escape or quote the value string. Extensions can be use to pass more information to client generators or -documentation generators. For example a type my have a friendly name to be displayed in documentation or -being used in a client's fluent interface. - diff --git a/vendor/k8s.io/code-generator/cmd/openapi-gen/args/BUILD.bazel b/vendor/k8s.io/code-generator/cmd/openapi-gen/args/BUILD.bazel deleted file mode 100644 index 892a31bc67519..0000000000000 --- a/vendor/k8s.io/code-generator/cmd/openapi-gen/args/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["args.go"], - importmap = "k8s.io/kops/vendor/k8s.io/code-generator/cmd/openapi-gen/args", - importpath = "k8s.io/code-generator/cmd/openapi-gen/args", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/k8s.io/gengo/args:go_default_library", - ], -) diff --git a/vendor/k8s.io/code-generator/cmd/openapi-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/openapi-gen/args/args.go deleted file mode 100644 index f9bb17e1a56b6..0000000000000 --- a/vendor/k8s.io/code-generator/cmd/openapi-gen/args/args.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package args - -import ( - "fmt" - - "github.com/spf13/pflag" - "k8s.io/gengo/args" -) - -// CustomArgs is used by the gengo framework to pass args specific to this generator. -type CustomArgs struct{} - -// NewDefaults returns default arguments for the generator. -func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { - genericArgs := args.Default().WithoutDefaultFlagParsing() - customArgs := &CustomArgs{} - genericArgs.CustomArgs = customArgs - genericArgs.OutputFileBaseName = "openapi_generated" - return genericArgs, customArgs -} - -// AddFlags add the generator flags to the flag set. -func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) {} - -// Validate checks the given arguments. -func Validate(genericArgs *args.GeneratorArgs) error { - _ = genericArgs.CustomArgs.(*CustomArgs) - - if len(genericArgs.OutputFileBaseName) == 0 { - return fmt.Errorf("output file base name cannot be empty") - } - if len(genericArgs.OutputPackagePath) == 0 { - return fmt.Errorf("output package cannot be empty") - } - - return nil -} diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/BUILD.bazel b/vendor/k8s.io/code-generator/cmd/register-gen/BUILD.bazel index 78af8f5f7190b..667d76b166df6 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/BUILD.bazel +++ b/vendor/k8s.io/code-generator/cmd/register-gen/BUILD.bazel @@ -7,12 +7,12 @@ go_library( importpath = "k8s.io/code-generator/cmd/register-gen", visibility = ["//visibility:private"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/register-gen/args:go_default_library", "//vendor/k8s.io/code-generator/cmd/register-gen/generators:go_default_library", "//vendor/k8s.io/code-generator/pkg/util:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/generators/BUILD.bazel b/vendor/k8s.io/code-generator/cmd/register-gen/generators/BUILD.bazel index 41133a4d7e447..3698b13979232 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/generators/BUILD.bazel +++ b/vendor/k8s.io/code-generator/cmd/register-gen/generators/BUILD.bazel @@ -10,11 +10,11 @@ go_library( importpath = "k8s.io/code-generator/cmd/register-gen/generators", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/types:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", "//vendor/k8s.io/gengo/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go index ca13ca85798c7..5186e421f2ea1 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go +++ b/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go @@ -22,7 +22,7 @@ import ( "path" "strings" - "github.com/golang/glog" + "k8s.io/klog" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" "k8s.io/gengo/args" @@ -46,7 +46,7 @@ func DefaultNameSystem() string { func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - glog.Fatalf("Failed loading boilerplate: %v", err) + klog.Fatalf("Failed loading boilerplate: %v", err) } packages := generator.Packages{} @@ -54,27 +54,27 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat pkg := context.Universe.Package(inputDir) internal, err := isInternal(pkg) if err != nil { - glog.V(5).Infof("skipping the generation of %s file, due to err %v", arguments.OutputFileBaseName, err) + klog.V(5).Infof("skipping the generation of %s file, due to err %v", arguments.OutputFileBaseName, err) continue } if internal { - glog.V(5).Infof("skipping the generation of %s file because %s package contains internal types, note that internal types don't have \"json\" tags", arguments.OutputFileBaseName, pkg.Name) + klog.V(5).Infof("skipping the generation of %s file because %s package contains internal types, note that internal types don't have \"json\" tags", arguments.OutputFileBaseName, pkg.Name) continue } registerFileName := "register.go" searchPath := path.Join(args.DefaultSourceTree(), inputDir, registerFileName) if _, err := os.Stat(path.Join(searchPath)); err == nil { - glog.V(5).Infof("skipping the generation of %s file because %s already exists in the path %s", arguments.OutputFileBaseName, registerFileName, searchPath) + klog.V(5).Infof("skipping the generation of %s file because %s already exists in the path %s", arguments.OutputFileBaseName, registerFileName, searchPath) continue } else if err != nil && !os.IsNotExist(err) { - glog.Fatalf("an error %v has occurred while checking if %s exists", err, registerFileName) + klog.Fatalf("an error %v has occurred while checking if %s exists", err, registerFileName) } gv := clientgentypes.GroupVersion{} { pathParts := strings.Split(pkg.Path, "/") if len(pathParts) < 2 { - glog.Errorf("the path of the package must contain the group name and the version, path = %s", pkg.Path) + klog.Errorf("the path of the package must contain the group name and the version, path = %s", pkg.Path) continue } gv.Group = clientgentypes.Group(pathParts[len(pathParts)-2]) @@ -84,14 +84,14 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // extract the fully qualified API group name from it and overwrite the group inferred from the package path if override := types.ExtractCommentTags("+", pkg.DocComments)["groupName"]; override != nil { groupName := override[0] - glog.V(5).Infof("overriding the group name with = %s", groupName) + klog.V(5).Infof("overriding the group name with = %s", groupName) gv.Group = clientgentypes.Group(groupName) } } typesToRegister := []*types.Type{} for _, t := range pkg.Types { - glog.V(5).Infof("considering type = %s", t.Name.String()) + klog.V(5).Infof("considering type = %s", t.Name.String()) for _, typeMember := range t.Members { if typeMember.Name == "TypeMeta" && typeMember.Embedded == true { typesToRegister = append(typesToRegister, t) diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/main.go b/vendor/k8s.io/code-generator/cmd/register-gen/main.go index db02a4af4b5a5..30a175d8d6201 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/register-gen/main.go @@ -20,8 +20,8 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/register-gen/args" "k8s.io/code-generator/cmd/register-gen/generators" @@ -30,6 +30,7 @@ import ( ) func main() { + klog.InitFlags(nil) genericArgs := generatorargs.NewDefaults() genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) genericArgs.AddFlags(pflag.CommandLine) @@ -38,7 +39,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } if err := genericArgs.Execute( @@ -46,7 +47,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh index d7ad5b2e07f8a..d8531a8d9d7cb 100755 --- a/vendor/k8s.io/code-generator/generate-groups.sh +++ b/vendor/k8s.io/code-generator/generate-groups.sh @@ -50,7 +50,7 @@ shift 4 # To support running this script from anywhere, we have to first cd into this directory # so we can install the tools. cd $(dirname "${0}") - go install ./cmd/{defaulter-gen,client-gen,lister-gen,informer-gen,deepcopy-gen} + go install ${GOFLAGS:-} ./cmd/{defaulter-gen,client-gen,lister-gen,informer-gen,deepcopy-gen} ) function codegen::join() { local IFS="$1"; shift; echo "$*"; } @@ -72,8 +72,8 @@ if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then fi if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then - echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/clientset" - ${GOPATH}/bin/client-gen --clientset-name ${CLIENTSET_NAME_VERSIONED:-versioned} --input-base "" --input $(codegen::join , "${FQ_APIS[@]}") --output-package ${OUTPUT_PKG}/clientset "$@" + echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" + ${GOPATH}/bin/client-gen --clientset-name ${CLIENTSET_NAME_VERSIONED:-versioned} --input-base "" --input $(codegen::join , "${FQ_APIS[@]}") --output-package ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset} "$@" fi if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then @@ -85,7 +85,7 @@ if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers" ${GOPATH}/bin/informer-gen \ --input-dirs $(codegen::join , "${FQ_APIS[@]}") \ - --versioned-clientset-package ${OUTPUT_PKG}/clientset/${CLIENTSET_NAME_VERSIONED:-versioned} \ + --versioned-clientset-package ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned} \ --listers-package ${OUTPUT_PKG}/listers \ --output-package ${OUTPUT_PKG}/informers \ "$@" diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh index 1220e77c587d4..6849ed94373c7 100755 --- a/vendor/k8s.io/code-generator/generate-internal-groups.sh +++ b/vendor/k8s.io/code-generator/generate-internal-groups.sh @@ -47,7 +47,7 @@ EXT_APIS_PKG="$4" GROUPS_WITH_VERSIONS="$5" shift 5 -go install ./$(dirname "${0}")/cmd/{defaulter-gen,conversion-gen,client-gen,lister-gen,informer-gen,deepcopy-gen} +go install ${GOFLAGS:-} ./$(dirname "${0}")/cmd/{defaulter-gen,conversion-gen,client-gen,lister-gen,informer-gen,deepcopy-gen} function codegen::join() { local IFS="$1"; shift; echo "$*"; } # enumerate group versions @@ -85,11 +85,11 @@ if [ "${GENS}" = "all" ] || grep -qw "conversion" <<<"${GENS}"; then fi if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then - echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/clientset" + echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" if [ -n "${INT_APIS_PKG}" ]; then - ${GOPATH}/bin/client-gen --clientset-name ${CLIENTSET_NAME_INTERNAL:-internalversion} --input-base "" --input $(codegen::join , $(printf '%s/ ' "${INT_FQ_APIS[@]}")) --output-package ${OUTPUT_PKG}/clientset "$@" + ${GOPATH}/bin/client-gen --clientset-name ${CLIENTSET_NAME_INTERNAL:-internalversion} --input-base "" --input $(codegen::join , $(printf '%s/ ' "${INT_FQ_APIS[@]}")) --output-package ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset} "$@" fi - ${GOPATH}/bin/client-gen --clientset-name ${CLIENTSET_NAME_VERSIONED:-versioned} --input-base "" --input $(codegen::join , "${EXT_FQ_APIS[@]}") --output-package ${OUTPUT_PKG}/clientset "$@" + ${GOPATH}/bin/client-gen --clientset-name ${CLIENTSET_NAME_VERSIONED:-versioned} --input-base "" --input $(codegen::join , "${EXT_FQ_APIS[@]}") --output-package ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset} "$@" fi if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then @@ -101,8 +101,8 @@ if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers" ${GOPATH}/bin/informer-gen \ --input-dirs $(codegen::join , "${ALL_FQ_APIS[@]}") \ - --versioned-clientset-package ${OUTPUT_PKG}/clientset/${CLIENTSET_NAME_VERSIONED:-versioned} \ - --internal-clientset-package ${OUTPUT_PKG}/clientset/${CLIENTSET_NAME_INTERNAL:-internalversion} \ + --versioned-clientset-package ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned} \ + --internal-clientset-package ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_INTERNAL:-internalversion} \ --listers-package ${OUTPUT_PKG}/listers \ --output-package ${OUTPUT_PKG}/informers \ "$@" diff --git a/vendor/k8s.io/code-generator/hack/update-codegen.sh b/vendor/k8s.io/code-generator/hack/update-codegen.sh index 767c2ab36b5e5..2a14fe527713c 100755 --- a/vendor/k8s.io/code-generator/hack/update-codegen.sh +++ b/vendor/k8s.io/code-generator/hack/update-codegen.sh @@ -30,3 +30,7 @@ $(dirname ${BASH_SOURCE})/../generate-groups.sh all \ k8s.io/code-generator/_examples/crd k8s.io/code-generator/_examples/crd/apis \ "example:v1 example2:v1" \ --output-base "$(dirname ${BASH_SOURCE})/../../.." + $(dirname ${BASH_SOURCE})/../generate-groups.sh all \ + k8s.io/code-generator/_examples/MixedCase k8s.io/code-generator/_examples/MixedCase/apis \ + "example:v1" \ + --output-base "$(dirname ${BASH_SOURCE})/../../.." diff --git a/vendor/k8s.io/code-generator/hack/verify-codegen.sh b/vendor/k8s.io/code-generator/hack/verify-codegen.sh index 25302228dfa98..497f2a484cac2 100755 --- a/vendor/k8s.io/code-generator/hack/verify-codegen.sh +++ b/vendor/k8s.io/code-generator/hack/verify-codegen.sh @@ -50,4 +50,6 @@ fi # smoke test echo "Smoke testing _example by compiling..." -go build ${SCRIPT_ROOT}/_example/... \ No newline at end of file +go build ./${SCRIPT_ROOT}/_examples/crd/... +go build ./${SCRIPT_ROOT}/_examples/apiserver/... +go build ./${SCRIPT_ROOT}/_examples/MixedCase/... diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml new file mode 100644 index 0000000000000..0f508dae66ed7 --- /dev/null +++ b/vendor/k8s.io/klog/.travis.yml @@ -0,0 +1,15 @@ +language: go +go_import_path: k8s.io/klog +dist: xenial +go: + - 1.9.x + - 1.10.x + - 1.11.x +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - diff -u <(echo -n) <(golint $(go list -e ./...)) + - go tool vet . + - go test -v -race ./... +install: + - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/kubernetes/pkg/generated/BUILD.bazel b/vendor/k8s.io/klog/BUILD.bazel similarity index 50% rename from vendor/k8s.io/kubernetes/pkg/generated/BUILD.bazel rename to vendor/k8s.io/klog/BUILD.bazel index f2983e6ff158b..00b2b6c2a561f 100644 --- a/vendor/k8s.io/kubernetes/pkg/generated/BUILD.bazel +++ b/vendor/k8s.io/klog/BUILD.bazel @@ -3,10 +3,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "bindata.go", - "doc.go", + "klog.go", + "klog_file.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/generated", - importpath = "k8s.io/kubernetes/pkg/generated", + importmap = "k8s.io/kops/vendor/k8s.io/klog", + importpath = "k8s.io/klog", visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/klog/CONTRIBUTING.md b/vendor/k8s.io/klog/CONTRIBUTING.md new file mode 100644 index 0000000000000..574a56abbbecb --- /dev/null +++ b/vendor/k8s.io/klog/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + +## Contact Information + +- [Slack](https://kubernetes.slack.com/messages/sig-architecture) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) diff --git a/vendor/k8s.io/klog/LICENSE b/vendor/k8s.io/klog/LICENSE new file mode 100644 index 0000000000000..37ec93a14fdcd --- /dev/null +++ b/vendor/k8s.io/klog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS new file mode 100644 index 0000000000000..380e514f2807b --- /dev/null +++ b/vendor/k8s.io/klog/OWNERS @@ -0,0 +1,19 @@ +# See the OWNERS docs at https://go.k8s.io/owners +reviewers: + - jayunit100 + - hoegaarden + - andyxning + - neolit123 + - pohly + - yagonobre + - vincepri + - detiber +approvers: + - dims + - thockin + - justinsb + - tallclair + - piosz + - brancz + - DirectXMan12 + - lavalamp diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md new file mode 100644 index 0000000000000..bee306f398fbd --- /dev/null +++ b/vendor/k8s.io/klog/README.md @@ -0,0 +1,97 @@ +klog +==== + +klog is a permanent fork of https://github.com/golang/glog. + +## Why was klog created? + +The decision to create klog was one that wasn't made lightly, but it was necessary due to some +drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README: + +> The code in this repo [...] is not itself under development + +This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below: + + * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented. + * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it + * A long term goal is to implement a logging interface that allows us to add context, change output format, etc. + +Historical context is available here: + + * https://github.com/kubernetes/kubernetes/issues/61006 + * https://github.com/kubernetes/kubernetes/issues/70264 + * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ + * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ + +---- + +How to use klog +=============== +- Replace imports for `github.com/golang/glog` with `k8s.io/klog` +- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags +- You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) +- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) +- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md)) + +### Coexisting with glog +This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +- [Slack](https://kubernetes.slack.com/messages/sig-architecture) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). + +---- + +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + https://github.com/google/glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation for the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/k8s.io/klog/RELEASE.md b/vendor/k8s.io/klog/RELEASE.md new file mode 100644 index 0000000000000..b53eb960ce78f --- /dev/null +++ b/vendor/k8s.io/klog/RELEASE.md @@ -0,0 +1,9 @@ +# Release Process + +The `klog` is released on an as-needed basis. The process is as follows: + +1. An issue is proposing a new release with a changelog since the last release +1. All [OWNERS](OWNERS) must LGTM this release +1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` +1. The release issue is closed +1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/vendor/k8s.io/klog/SECURITY_CONTACTS b/vendor/k8s.io/klog/SECURITY_CONTACTS new file mode 100644 index 0000000000000..6128a586995b4 --- /dev/null +++ b/vendor/k8s.io/klog/SECURITY_CONTACTS @@ -0,0 +1,20 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +dims +thockin +justinsb +tallclair +piosz +brancz +DirectXMan12 +lavalamp diff --git a/vendor/k8s.io/klog/code-of-conduct.md b/vendor/k8s.io/klog/code-of-conduct.md new file mode 100644 index 0000000000000..0d15c00cf3252 --- /dev/null +++ b/vendor/k8s.io/klog/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go new file mode 100644 index 0000000000000..887ea62dff75e --- /dev/null +++ b/vendor/k8s.io/klog/klog.go @@ -0,0 +1,1273 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// glog.Info("Prepare to repel boarders") +// +// glog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if glog.V(2) { +// glog.Info("Starting transaction...") +// } +// +// glog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to files in a temporary directory. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=false +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +// +package klog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + stdLog "log" + "math" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 +) + +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +func init() { + // Default stderrThreshold is ERROR. + logging.stderrThreshold = errorLog + + logging.setVState(0, nil, false) + go logging.flushDaemon() +} + +// InitFlags is for explicitly initializing the flags +func InitFlags(flagset *flag.FlagSet) { + if flagset == nil { + flagset = flag.CommandLine + } + flagset.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory") + flagset.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file") + flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800, + "Defines the maximum size a log file can grow to. Unit is megabytes. "+ + "If the value is 0, the maximum file size is unlimited.") + flagset.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files") + flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") + flagset.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") + flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when openning log files") + flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ + + // If non-empty, overrides the choice of directory in which to write logs. + // See createLogDirs for the full list of possible destinations. + logDir string + + // If non-empty, specifies the path of the file to write logs. mutually exclusive + // with the log-dir option. + logFile string + + // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the + // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile. + logFileMaxSizeMB uint64 + + // If true, do not add the prefix headers, useful when used with SetOutput + skipHeaders bool + + // If true, do not add the headers to log files + skipLogHeaders bool +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&logging.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header and the user's file and line number. +The depth specifies how many stack frames above lives the source line to be identified in the log message. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { + _, file, line, ok := runtime.Caller(3 + depth) + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + return l.formatHeader(s, file, line), file, line +} + +// formatHeader formats a log header using the provided file name and line number. +func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { + now := timeNow() + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + if l.skipHeaders { + return buf + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.tmp[21] = ' ' + buf.nDigits(7, 22, pid, ' ') // TODO: should be TID + buf.tmp[29] = ' ' + buf.Write(buf.tmp[:30]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintln(buf, args...) + l.output(s, buf, file, line, false) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + l.printDepth(s, 1, args...) +} + +func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +// printWithFileLine behaves like print but uses the provided file and line number. If +// alsoLogToStderr is true, the log message always appears on standard error; it +// will also appear in the log file unless --logtostderr is set. +func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, alsoToStderr) +} + +// redirectBuffer is used to set an alternate destination for the logs +type redirectBuffer struct { + w io.Writer +} + +func (rb *redirectBuffer) Sync() error { + return nil +} + +func (rb *redirectBuffer) Flush() error { + return nil +} + +func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { + return rb.w.Write(bytes) +} + +// SetOutput sets the output destination for all severities +func SetOutput(w io.Writer) { + for s := fatalLog; s >= infoLog; s-- { + rb := &redirectBuffer{ + w: w, + } + logging.file[s] = rb + } +} + +// SetOutputBySeverity sets the output destination for specific severity +func SetOutputBySeverity(name string, w io.Writer) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) + } + rb := &redirectBuffer{ + w: w, + } + logging.file[sev] = rb +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { + l.mu.Lock() + if l.traceLocation.isSet() { + if l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + if s == fatalLog { + // If we got here via Exit rather than Fatal, print no stacks. + if atomic.LoadUint32(&fatalNoStacks) > 0 { + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(1) + } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when glog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file + maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. +func CalculateMaxSize() uint64 { + if logging.logFile != "" { + if logging.logFileMaxSizeMB == 0 { + // If logFileMaxSizeMB is zero, we don't have limitations on the log size. + return math.MaxUint64 + } + // Flag logFileMaxSizeMB is in MB for user convenience. + return logging.logFileMaxSizeMB * 1024 * 1024 + } + // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size. + return MaxSize +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= sb.maxbytes { + if err := sb.rotateFile(time.Now(), false); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now, startup) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + if sb.logger.skipLogHeaders { + return nil + } + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + maxbytes: CalculateMaxSize(), + } + if err := sb.rotateFile(now, true); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 5 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + // printWithFileLine with alsoToStderr=true, so standard log messages + // always appear on standard error. + logging.printWithFileLine(severity(lb), file, line, true, text) + return len(b), nil +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if glog.V(2) { glog.Info("log this") } +// or +// glog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v { + logging.print(infoLog, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v { + logging.println(infoLog, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v { + logging.printf(infoLog, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(infoLog, args...) +} + +// InfoDepth acts as Info but uses depth to determine which call frame to log. +// InfoDepth(0, "msg") is the same as Info("msg"). +func InfoDepth(depth int, args ...interface{}) { + logging.printDepth(infoLog, depth, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Infoln(args ...interface{}) { + logging.println(infoLog, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(infoLog, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(warningLog, args...) +} + +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). +func WarningDepth(depth int, args ...interface{}) { + logging.printDepth(warningLog, depth, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Warningln(args ...interface{}) { + logging.println(warningLog, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(warningLog, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(errorLog, args...) +} + +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). +func ErrorDepth(depth int, args ...interface{}) { + logging.printDepth(errorLog, depth, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Errorln(args ...interface{}) { + logging.println(errorLog, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(errorLog, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(fatalLog, args...) +} + +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). +func FatalDepth(depth int, args ...interface{}) { + logging.printDepth(fatalLog, depth, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Fatalln(args ...interface{}) { + logging.println(fatalLog, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(fatalLog, format, args...) +} + +// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. +// It allows Exit and relatives to use the Fatal logs. +var fatalNoStacks uint32 + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Exit(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.print(fatalLog, args...) +} + +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). +func ExitDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printDepth(fatalLog, depth, args...) +} + +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +func Exitln(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.println(fatalLog, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Exitf(format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printf(fatalLog, format, args...) +} diff --git a/vendor/k8s.io/klog/klog_file.go b/vendor/k8s.io/klog/klog_file.go new file mode 100644 index 0000000000000..e4010ad4df06a --- /dev/null +++ b/vendor/k8s.io/klog/klog_file.go @@ -0,0 +1,139 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package klog + +import ( + "errors" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +func createLogDirs() { + if logging.logDir != "" { + logDirs = append(logDirs, logging.logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) { + if logging.logFile != "" { + f, err := openOrCreate(logging.logFile, startup) + if err == nil { + return f, logging.logFile, nil + } + return nil, "", fmt.Errorf("log: unable to create log: %v", err) + } + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := openOrCreate(fname, startup) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} + +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func openOrCreate(name string, startup bool) (*os.File, error) { + if startup { + f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + return f, err + } + f, err := os.Create(name) + return f, err +} diff --git a/vendor/k8s.io/code-generator/cmd/openapi-gen/BUILD.bazel b/vendor/k8s.io/kube-openapi/cmd/openapi-gen/BUILD.bazel similarity index 50% rename from vendor/k8s.io/code-generator/cmd/openapi-gen/BUILD.bazel rename to vendor/k8s.io/kube-openapi/cmd/openapi-gen/BUILD.bazel index 7cc93e1489abb..d24793a37e772 100644 --- a/vendor/k8s.io/code-generator/cmd/openapi-gen/BUILD.bazel +++ b/vendor/k8s.io/kube-openapi/cmd/openapi-gen/BUILD.bazel @@ -2,16 +2,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") go_library( name = "go_default_library", - srcs = ["main.go"], - importmap = "k8s.io/kops/vendor/k8s.io/code-generator/cmd/openapi-gen", - importpath = "k8s.io/code-generator/cmd/openapi-gen", + srcs = ["openapi-gen.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kube-openapi/cmd/openapi-gen", + importpath = "k8s.io/kube-openapi/cmd/openapi-gen", visibility = ["//visibility:private"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/k8s.io/code-generator/cmd/openapi-gen/args:go_default_library", - "//vendor/k8s.io/code-generator/pkg/util:go_default_library", - "//vendor/k8s.io/gengo/args:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kube-openapi/cmd/openapi-gen/args:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/generators:go_default_library", ], ) diff --git a/vendor/k8s.io/code-generator/cmd/openapi-gen/main.go b/vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go similarity index 74% rename from vendor/k8s.io/code-generator/cmd/openapi-gen/main.go rename to vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go index fbafc502577d7..3d42da21a844c 100644 --- a/vendor/k8s.io/code-generator/cmd/openapi-gen/main.go +++ b/vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,28 +17,25 @@ limitations under the License. // This package generates openAPI definition file to be used in open API spec generation on API servers. To generate // definition for a specific type or package add "+k8s:openapi-gen=true" tag to the type/package comment lines. To // exclude a type from a tagged package, add "+k8s:openapi-gen=false" tag to the type comment lines. + package main import ( "flag" - "path/filepath" + "log" - "github.com/golang/glog" - "github.com/spf13/pflag" - "k8s.io/gengo/args" + generatorargs "k8s.io/kube-openapi/cmd/openapi-gen/args" "k8s.io/kube-openapi/pkg/generators" - generatorargs "k8s.io/code-generator/cmd/openapi-gen/args" - "k8s.io/code-generator/pkg/util" + "github.com/spf13/pflag" + + "k8s.io/klog" ) func main() { + klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() - // Override defaults. - // TODO: move this out of openapi-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) - genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) flag.Set("logtostderr", "true") @@ -46,16 +43,16 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + log.Fatalf("Arguments validation error: %v", err) } - // Run it. + // Generates the code for the OpenAPIDefinitions. if err := genericArgs.Execute( generators.NameSystems(), generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + log.Fatalf("OpenAPI code generation error: %v", err) } - glog.V(2).Info("Completed successfully.") + log.Println("Code for OpenAPI definitions generated") } diff --git a/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go b/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go index f48700d545440..072c8ec4ae5e2 100644 --- a/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go @@ -20,7 +20,6 @@ import ( "encoding/json" "fmt" "net/http" - "reflect" "strings" restful "github.com/emicklei/go-restful" @@ -58,7 +57,7 @@ func BuildOpenAPIDefinitionsForResource(model interface{}, config *common.Config o := newOpenAPI(config) // We can discard the return value of toSchema because all we care about is the side effect of calling it. // All the models created for this resource get added to o.swagger.Definitions - _, err := o.toSchema(getCanonicalTypeName(model)) + _, err := o.toSchema(util.GetCanonicalTypeName(model)) if err != nil { return nil, err } @@ -92,6 +91,7 @@ func newOpenAPI(config *common.Config) openAPI { SwaggerProps: spec.SwaggerProps{ Swagger: OpenAPIVersion, Definitions: spec.Definitions{}, + Responses: config.ResponseDefinitions, Paths: &spec.Paths{Paths: map[string]spec.PathItem{}}, Info: config.Info, }, @@ -135,21 +135,6 @@ func (o *openAPI) finalizeSwagger() (*spec.Swagger, error) { return o.swagger, nil } -func getCanonicalTypeName(model interface{}) string { - t := reflect.TypeOf(model) - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - if t.PkgPath() == "" { - return t.Name() - } - path := t.PkgPath() - if strings.Contains(path, "/vendor/") { - path = path[strings.Index(path, "/vendor/")+len("/vendor/"):] - } - return path + "." + t.Name() -} - func (o *openAPI) buildDefinitionRecursively(name string) error { uniqueName, extensions := o.config.GetDefinitionName(name) if _, ok := o.swagger.Definitions[uniqueName]; ok { @@ -335,7 +320,7 @@ func (o *openAPI) buildOperations(route restful.Route, inPathCommonParamsMap map } func (o *openAPI) buildResponse(model interface{}, description string) (spec.Response, error) { - schema, err := o.toSchema(getCanonicalTypeName(model)) + schema, err := o.toSchema(util.GetCanonicalTypeName(model)) if err != nil { return spec.Response{}, err } @@ -413,7 +398,7 @@ func (o *openAPI) buildParameter(restParam restful.ParameterData, bodySample int case restful.BodyParameterKind: if bodySample != nil { ret.In = "body" - ret.Schema, err = o.toSchema(getCanonicalTypeName(bodySample)) + ret.Schema, err = o.toSchema(util.GetCanonicalTypeName(bodySample)) return ret, err } else { // There is not enough information in the body parameter to build the definition. diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index 0d235876deb48..7d5534b24ecdf 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -59,6 +59,12 @@ type Config struct { // will show up as ... "responses" : {"default" : $DefaultResponse} in the spec. DefaultResponse *spec.Response + // ResponseDefinitions will be added to "responses" under the top-level swagger object. This is an object + // that holds responses definitions that can be used across operations. This property does not define + // global responses for all operations. For more info please refer: + // https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#fixed-fields + ResponseDefinitions map[string]spec.Response + // CommonResponses will be added as a response to all operation specs. This is a good place to add common // responses such as authorization failed. CommonResponses map[int]spec.Response diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/BUILD.bazel b/vendor/k8s.io/kube-openapi/pkg/generators/BUILD.bazel index 0fd5263c2f2c4..4df69277ae9c4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/BUILD.bazel +++ b/vendor/k8s.io/kube-openapi/pkg/generators/BUILD.bazel @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "api_linter.go", + "config.go", "extension.go", "openapi.go", ], @@ -11,12 +12,12 @@ go_library( importpath = "k8s.io/kube-openapi/pkg/generators", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/examples/set-gen/sets:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", "//vendor/k8s.io/gengo/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kube-openapi/cmd/openapi-gen/args:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/generators/rules:go_default_library", diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/README b/vendor/k8s.io/kube-openapi/pkg/generators/README.md similarity index 88% rename from vendor/k8s.io/kube-openapi/pkg/generators/README rename to vendor/k8s.io/kube-openapi/pkg/generators/README.md index feb19b401a93b..72b4e5fb4396c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/README +++ b/vendor/k8s.io/kube-openapi/pkg/generators/README.md @@ -4,8 +4,9 @@ - To exclude a type or a member from a tagged package/type, add "+k8s:openapi-gen=false" tag to the comment lines. # OpenAPI Extensions + OpenAPI spec can have extensions on types. To define one or more extensions on a type or its member -add `+k8s:openapi-gen=x-kubernetes-$NAME:`$VALUE`` to the comment lines before type/member. A type/member can +add `+k8s:openapi-gen=x-kubernetes-$NAME:$VALUE` to the comment lines before type/member. A type/member can have multiple extensions. The rest of the line in the comment will be used as $VALUE so there is no need to escape or quote the value string. Extensions can be used to pass more information to client generators or documentation generators. For example a type might have a friendly name to be displayed in documentation or @@ -17,6 +18,7 @@ Custom types which otherwise don't map directly to OpenAPI can override their OpenAPI definition by implementing a function named "OpenAPIDefinition" with the following signature: +```go import openapi "k8s.io/kube-openapi/pkg/common" // ... @@ -35,12 +37,13 @@ the following signature: }, } } +``` Alternatively, the type can avoid the "openapi" import by defining the following methods. The following example produces the same OpenAPI definition as the example above: +```go func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } func (_ Time) OpenAPISchemaFormat() string { return "date-time" } - -TODO(mehdy): Make k8s:openapi-gen a parameter to the generator now that OpenAPI has its own repo. +``` diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go b/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go index 9270d26320be3..f732858875d65 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go @@ -17,16 +17,114 @@ limitations under the License. package generators import ( + "bytes" "fmt" "io" + "io/ioutil" + "os" + "sort" "k8s.io/kube-openapi/pkg/generators/rules" - "github.com/golang/glog" + "k8s.io/gengo/generator" "k8s.io/gengo/types" + "k8s.io/klog" ) -// apiLinter is the framework hosting mutliple API rules and recording API rule +const apiViolationFileType = "api-violation" + +type apiViolationFile struct { + // Since our file actually is unrelated to the package structure, use a + // path that hasn't been mangled by the framework. + unmangledPath string +} + +func (a apiViolationFile) AssembleFile(f *generator.File, path string) error { + path = a.unmangledPath + klog.V(2).Infof("Assembling file %q", path) + if path == "-" { + _, err := io.Copy(os.Stdout, &f.Body) + return err + } + + output, err := os.Create(path) + if err != nil { + return err + } + defer output.Close() + _, err = io.Copy(output, &f.Body) + return err +} + +func (a apiViolationFile) VerifyFile(f *generator.File, path string) error { + if path == "-" { + // Nothing to verify against. + return nil + } + path = a.unmangledPath + + formatted := f.Body.Bytes() + existing, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("unable to read file %q for comparison: %v", path, err) + } + if bytes.Compare(formatted, existing) == 0 { + return nil + } + + // Be nice and find the first place where they differ + // (Copied from gengo's default file type) + i := 0 + for i < len(formatted) && i < len(existing) && formatted[i] == existing[i] { + i++ + } + eDiff, fDiff := existing[i:], formatted[i:] + if len(eDiff) > 100 { + eDiff = eDiff[:100] + } + if len(fDiff) > 100 { + fDiff = fDiff[:100] + } + return fmt.Errorf("output for %q differs; first existing/expected diff: \n %q\n %q", path, string(eDiff), string(fDiff)) +} + +func newAPIViolationGen() *apiViolationGen { + return &apiViolationGen{ + linter: newAPILinter(), + } +} + +type apiViolationGen struct { + generator.DefaultGen + + linter *apiLinter +} + +func (v *apiViolationGen) FileType() string { return apiViolationFileType } +func (v *apiViolationGen) Filename() string { + return "this file is ignored by the file assembler" +} + +func (v *apiViolationGen) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + klog.V(5).Infof("validating API rules for type %v", t) + if err := v.linter.validate(t); err != nil { + return err + } + return nil +} + +// Finalize prints the API rule violations to report file (if specified from +// arguments) or stdout (default) +func (v *apiViolationGen) Finalize(c *generator.Context, w io.Writer) error { + // NOTE: we don't return error here because we assume that the report file will + // get evaluated afterwards to determine if error should be raised. For example, + // you can have make rules that compare the report file with existing known + // violations (whitelist) and determine no error if no change is detected. + v.linter.report(w) + return nil +} + +// apiLinter is the framework hosting multiple API rules and recording API rule // violations type apiLinter struct { // API rules that implement APIRule interface and output API rule violations @@ -40,6 +138,7 @@ func newAPILinter() *apiLinter { return &apiLinter{ rules: []APIRule{ &rules.NamesMatch{}, + &rules.OmitEmptyMatchCase{}, }, } } @@ -57,6 +156,25 @@ type apiViolation struct { field string } +// apiViolations implements sort.Interface for []apiViolation based on the fields: rule, +// packageName, typeName and field. +type apiViolations []apiViolation + +func (a apiViolations) Len() int { return len(a) } +func (a apiViolations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a apiViolations) Less(i, j int) bool { + if a[i].rule != a[j].rule { + return a[i].rule < a[j].rule + } + if a[i].packageName != a[j].packageName { + return a[i].packageName < a[j].packageName + } + if a[i].typeName != a[j].typeName { + return a[i].typeName < a[j].typeName + } + return a[i].field < a[j].field +} + // APIRule is the interface for validating API rule on Go types type APIRule interface { // Validate evaluates API rule on type t and returns a list of field names in @@ -71,7 +189,7 @@ type APIRule interface { // validate runs all API rules on type t and records any API rule violation func (l *apiLinter) validate(t *types.Type) error { for _, r := range l.rules { - glog.V(5).Infof("validating API rule %v for type %v", r.Name(), t) + klog.V(5).Infof("validating API rule %v for type %v", r.Name(), t) fields, err := r.Validate(t) if err != nil { return err @@ -90,6 +208,7 @@ func (l *apiLinter) validate(t *types.Type) error { // report prints any API rule violation to writer w and returns error if violation exists func (l *apiLinter) report(w io.Writer) error { + sort.Sort(apiViolations(l.violations)) for _, v := range l.violations { fmt.Fprintf(w, "API rule violation: %s,%s,%s,%s\n", v.rule, v.packageName, v.typeName, v.field) } diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/config.go b/vendor/k8s.io/kube-openapi/pkg/generators/config.go new file mode 100644 index 0000000000000..33cd9eb5a8a09 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/config.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "path/filepath" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "k8s.io/klog" + + generatorargs "k8s.io/kube-openapi/cmd/openapi-gen/args" +) + +type identityNamer struct{} + +func (_ identityNamer) Name(t *types.Type) string { + return t.Name.String() +} + +var _ namer.Namer = identityNamer{} + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer("", nil), + "sorting_namer": identityNamer{}, + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "sorting_namer" +} + +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) + header = append(header, []byte( + ` +// This file was autogenerated by openapi-gen. Do not edit it manually! + +`)...) + + reportPath := "-" + if customArgs, ok := arguments.CustomArgs.(*generatorargs.CustomArgs); ok { + reportPath = customArgs.ReportFilename + } + context.FileTypes[apiViolationFileType] = apiViolationFile{ + unmangledPath: reportPath, + } + + return generator.Packages{ + &generator.DefaultPackage{ + PackageName: filepath.Base(arguments.OutputPackagePath), + PackagePath: arguments.OutputPackagePath, + HeaderText: header, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + newOpenAPIGen( + arguments.OutputFileBaseName, + arguments.OutputPackagePath, + ), + newAPIViolationGen(), + } + }, + FilterFunc: apiTypeFilterFunc, + }, + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/extension.go b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go index befe38db24860..14eab18f6b917 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/extension.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go @@ -36,20 +36,20 @@ type extensionAttributes struct { // Extension tag to openapi extension attributes var tagToExtension = map[string]extensionAttributes{ - "patchMergeKey": extensionAttributes{ + "patchMergeKey": { xName: "x-kubernetes-patch-merge-key", kind: types.Slice, }, - "patchStrategy": extensionAttributes{ + "patchStrategy": { xName: "x-kubernetes-patch-strategy", kind: types.Slice, allowedValues: sets.NewString("merge", "retainKeys"), }, - "listMapKey": extensionAttributes{ + "listMapKey": { xName: "x-kubernetes-list-map-keys", kind: types.Slice, }, - "listType": extensionAttributes{ + "listType": { xName: "x-kubernetes-list-type", kind: types.Slice, allowedValues: sets.NewString("atomic", "set", "map"), diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go index d6c6275a78f6e..11d42b6d335b3 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -20,20 +20,17 @@ import ( "bytes" "fmt" "io" - "os" "path/filepath" "reflect" "sort" "strings" - "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" - generatorargs "k8s.io/kube-openapi/cmd/openapi-gen/args" openapi "k8s.io/kube-openapi/pkg/common" - "github.com/golang/glog" + "k8s.io/klog" ) // This is the comment tag that carries parameters for open API generation. @@ -88,69 +85,19 @@ func hasOptionalTag(m *types.Member) bool { return hasOptionalCommentTag || hasOptionalJsonTag } -type identityNamer struct{} - -func (_ identityNamer) Name(t *types.Type) string { - return t.Name.String() -} - -var _ namer.Namer = identityNamer{} - -// NameSystems returns the name system used by the generators in this package. -func NameSystems() namer.NameSystems { - return namer.NameSystems{ - "raw": namer.NewRawNamer("", nil), - "sorting_namer": identityNamer{}, - } -} - -// DefaultNameSystem returns the default name system for ordering the types to be -// processed by the generators in this package. -func DefaultNameSystem() string { - return "sorting_namer" -} - -func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - boilerplate, err := arguments.LoadGoBoilerplate() - if err != nil { - glog.Fatalf("Failed loading boilerplate: %v", err) +func apiTypeFilterFunc(c *generator.Context, t *types.Type) bool { + // There is a conflict between this codegen and codecgen, we should avoid types generated for codecgen + if strings.HasPrefix(t.Name.Name, "codecSelfer") { + return false } - header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) - header = append(header, []byte( - ` -// This file was autogenerated by openapi-gen. Do not edit it manually! - -`)...) - - reportFilename := "-" - if customArgs, ok := arguments.CustomArgs.(*generatorargs.CustomArgs); ok { - reportFilename = customArgs.ReportFilename + pkg := c.Universe.Package(t.Name.Package) + if hasOpenAPITagValue(pkg.Comments, tagValueTrue) { + return !hasOpenAPITagValue(t.CommentLines, tagValueFalse) } - - return generator.Packages{ - &generator.DefaultPackage{ - PackageName: filepath.Base(arguments.OutputPackagePath), - PackagePath: arguments.OutputPackagePath, - HeaderText: header, - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { - return []generator.Generator{NewOpenAPIGen(arguments.OutputFileBaseName, arguments.OutputPackagePath, context, newAPILinter(), reportFilename)} - }, - FilterFunc: func(c *generator.Context, t *types.Type) bool { - // There is a conflict between this codegen and codecgen, we should avoid types generated for codecgen - if strings.HasPrefix(t.Name.Name, "codecSelfer") { - return false - } - pkg := context.Universe.Package(t.Name.Package) - if hasOpenAPITagValue(pkg.Comments, tagValueTrue) { - return !hasOpenAPITagValue(t.CommentLines, tagValueFalse) - } - if hasOpenAPITagValue(t.CommentLines, tagValueTrue) { - return true - } - return false - }, - }, + if hasOpenAPITagValue(t.CommentLines, tagValueTrue) { + return true } + return false } const ( @@ -162,24 +109,17 @@ const ( type openAPIGen struct { generator.DefaultGen // TargetPackage is the package that will get GetOpenAPIDefinitions function returns all open API definitions. - targetPackage string - imports namer.ImportTracker - types []*types.Type - context *generator.Context - linter *apiLinter - reportFilename string + targetPackage string + imports namer.ImportTracker } -func NewOpenAPIGen(sanitizedName string, targetPackage string, context *generator.Context, linter *apiLinter, reportFilename string) generator.Generator { +func newOpenAPIGen(sanitizedName string, targetPackage string) generator.Generator { return &openAPIGen{ DefaultGen: generator.DefaultGen{ OptionalName: sanitizedName, }, - imports: generator.NewImportTracker(), - targetPackage: targetPackage, - context: context, - linter: linter, - reportFilename: reportFilename, + imports: generator.NewImportTracker(), + targetPackage: targetPackage, } } @@ -198,15 +138,6 @@ func (g *openAPIGen) Namers(c *generator.Context) namer.NameSystems { } } -func (g *openAPIGen) Filter(c *generator.Context, t *types.Type) bool { - // There is a conflict between this codegen and codecgen, we should avoid types generated for codecgen - if strings.HasPrefix(t.Name.Name, "codecSelfer") { - return false - } - g.types = append(g.types, t) - return true -} - func (g *openAPIGen) isOtherPackage(pkg string) bool { if pkg == g.targetPackage { return false @@ -239,7 +170,7 @@ func (g *openAPIGen) Init(c *generator.Context, w io.Writer) error { sw.Do("func GetOpenAPIDefinitions(ref $.ReferenceCallback|raw$) map[string]$.OpenAPIDefinition|raw$ {\n", argsFromType(nil)) sw.Do("return map[string]$.OpenAPIDefinition|raw${\n", argsFromType(nil)) - for _, t := range g.types { + for _, t := range c.Order { err := newOpenAPITypeWriter(sw).generateCall(t) if err != nil { return err @@ -253,11 +184,7 @@ func (g *openAPIGen) Init(c *generator.Context, w io.Writer) error { } func (g *openAPIGen) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { - glog.V(5).Infof("validating API rules for type %v", t) - if err := g.linter.validate(t); err != nil { - return err - } - glog.V(5).Infof("generating for type %v", t) + klog.V(5).Infof("generating for type %v", t) sw := generator.NewSnippetWriter(w, c, "$", "$") err := newOpenAPITypeWriter(sw).generate(t) if err != nil { @@ -362,7 +289,7 @@ func (g openAPITypeWriter) generateMembers(t *types.Type, required []string) ([] required = append(required, name) } if err = g.generateProperty(&m, t); err != nil { - glog.Errorf("Error when generating: %v, %v\n", name, m) + klog.Errorf("Error when generating: %v, %v\n", name, m) return required, err } } @@ -449,7 +376,7 @@ func (g openAPITypeWriter) generateStructExtensions(t *types.Type) error { // Initially, we will only log struct extension errors. if len(errors) > 0 { for _, e := range errors { - glog.V(2).Infof("[%s]: %s\n", t.String(), e) + klog.V(2).Infof("[%s]: %s\n", t.String(), e) } } // TODO(seans3): Validate struct extensions here. @@ -465,7 +392,7 @@ func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *typ if len(errors) > 0 { errorPrefix := fmt.Sprintf("[%s] %s:", parent.String(), m.String()) for _, e := range errors { - glog.V(2).Infof("%s %s\n", errorPrefix, e) + klog.V(2).Infof("%s %s\n", errorPrefix, e) } } g.emitExtensions(extensions) @@ -678,27 +605,3 @@ func (g openAPITypeWriter) generateSliceProperty(t *types.Type) error { g.Do("},\n},\n},\n", nil) return nil } - -// Finalize prints the API rule violations to report file (if specified from arguments) or stdout (default) -func (g *openAPIGen) Finalize(c *generator.Context, w io.Writer) error { - // If report file isn't specified, return error to force user to choose either stdout ("-") or a file name - if len(g.reportFilename) == 0 { - return fmt.Errorf("empty report file name: please provide a valid file name or use the default \"-\" (stdout)") - } - // If stdout is specified, print violations and return error - if g.reportFilename == "-" { - return g.linter.report(os.Stdout) - } - // Otherwise, print violations to report file and return nil - f, err := os.Create(g.reportFilename) - if err != nil { - return err - } - defer f.Close() - g.linter.report(f) - // NOTE: we don't return error here because we assume that the report file will - // get evaluated afterwards to determine if error should be raised. For example, - // you can have make rules that compare the report file with existing known - // violations (whitelist) and determine no error if no change is detected. - return nil -} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/BUILD.bazel b/vendor/k8s.io/kube-openapi/pkg/generators/rules/BUILD.bazel index 6130f10b4631e..50688c195b42e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/rules/BUILD.bazel +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "doc.go", "names_match.go", + "omitempty_match_case.go", ], importmap = "k8s.io/kops/vendor/k8s.io/kube-openapi/pkg/generators/rules", importpath = "k8s.io/kube-openapi/pkg/generators/rules", diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS new file mode 100755 index 0000000000000..235bc545b88b6 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS @@ -0,0 +1,4 @@ +reviewers: +- roycaihw +approvers: +- roycaihw diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go new file mode 100644 index 0000000000000..dd37ad8a578ae --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go @@ -0,0 +1,64 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "reflect" + "strings" + + "k8s.io/gengo/types" +) + +// OmitEmptyMatchCase implements APIRule interface. +// "omitempty" must appear verbatim (no case variants). +type OmitEmptyMatchCase struct{} + +func (n *OmitEmptyMatchCase) Name() string { + return "omitempty_match_case" +} + +func (n *OmitEmptyMatchCase) Validate(t *types.Type) ([]string, error) { + fields := make([]string, 0) + + // Only validate struct type and ignore the rest + switch t.Kind { + case types.Struct: + for _, m := range t.Members { + goName := m.Name + jsonTag, ok := reflect.StructTag(m.Tags).Lookup("json") + if !ok { + continue + } + + parts := strings.Split(jsonTag, ",") + if len(parts) < 2 { + // no tags other than name + continue + } + if parts[0] == "-" { + // not serialized + continue + } + for _, part := range parts[1:] { + if strings.EqualFold(part, "omitempty") && part != "omitempty" { + fields = append(fields, goName) + } + } + } + } + return fields, nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index a57dcd363f60d..890a39399f668 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -196,20 +196,24 @@ func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error } fields := map[string]Schema{} + fieldOrder := []string{} for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { var err error - path := path.FieldPath(namedSchema.GetName()) - fields[namedSchema.GetName()], err = d.ParseSchema(namedSchema.GetValue(), &path) + name := namedSchema.GetName() + path := path.FieldPath(name) + fields[name], err = d.ParseSchema(namedSchema.GetValue(), &path) if err != nil { return nil, err } + fieldOrder = append(fieldOrder, name) } return &Kind{ BaseSchema: d.parseBaseSchema(s, path), RequiredFields: s.GetRequired(), Fields: fields, + FieldOrder: fieldOrder, }, nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go index f26b5ef881041..46643aa508146 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -173,6 +173,8 @@ type Kind struct { RequiredFields []string // Maps field names to types. Fields map[string]Schema + // FieldOrder reports the canonical order for the fields. + FieldOrder []string } var _ Schema = &Kind{} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/util.go b/vendor/k8s.io/kube-openapi/pkg/util/util.go index bcc0c4d4bb57f..c5c42cd44cd44 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/util.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/util.go @@ -16,7 +16,10 @@ limitations under the License. package util -import "strings" +import ( + "reflect" + "strings" +) // ToCanonicalName converts Golang package/type name into canonical OpenAPI name. // Examples: @@ -37,3 +40,20 @@ func ToCanonicalName(name string) string { } return strings.Join(nameParts, ".") } + +// GetCanonicalTypeName will find the canonical type name of a sample object, removing +// the "vendor" part of the path +func GetCanonicalTypeName(model interface{}) string { + t := reflect.TypeOf(model) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.PkgPath() == "" { + return t.Name() + } + path := t.PkgPath() + if strings.Contains(path, "/vendor/") { + path = path[strings.Index(path, "/vendor/")+len("/vendor/"):] + } + return path + "." + t.Name() +} diff --git a/vendor/k8s.io/kubernetes/Godeps/LICENSES b/vendor/k8s.io/kubernetes/Godeps/LICENSES index a7832673562a1..ce4c2e74d5aea 100644 --- a/vendor/k8s.io/kubernetes/Godeps/LICENSES +++ b/vendor/k8s.io/kubernetes/Godeps/LICENSES @@ -8137,7 +8137,7 @@ SOFTWARE. ================================================================================ -= vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute licensed under: = += vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute licensed under: = Apache License @@ -8767,7 +8767,7 @@ SOFTWARE. ================================================================================ -= vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage licensed under: = += vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage licensed under: = Apache License @@ -10648,6 +10648,205 @@ THE SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/Azure/go-autorest/logger licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/Azure/go-autorest/LICENSE a250e5ac3848f2acadb5adcb9555c18b +================================================================================ + + ================================================================================ = vendor/github.com/Azure/go-autorest/version licensed under: = @@ -13367,6 +13566,216 @@ THE SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/bazelbuild/bazel-gazelle/internal/testtools licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/bazelbuild/bazel-gazelle/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/bazelbuild/bazel-gazelle/internal/version licensed under: = @@ -16512,10 +16921,9 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/github.com/cockroachdb/cmux licensed under: = - += vendor/github.com/codedellemc/goscaleio licensed under: = - Apache License +Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -16695,7 +17103,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -16703,7 +17111,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16717,12 +17125,13 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. See the License for the specific language governing permissions and limitations under the License. -= vendor/github.com/cockroachdb/cmux/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 + += vendor/github.com/codedellemc/goscaleio/LICENSE d2794c0df5b907fdace235a619d80314 ================================================================================ ================================================================================ -= vendor/github.com/codedellemc/goscaleio licensed under: = += vendor/github.com/codedellemc/goscaleio/types/v1 licensed under: = Apache License Version 2.0, January 2004 @@ -16932,9 +17341,9 @@ Apache License ================================================================================ -= vendor/github.com/codedellemc/goscaleio/types/v1 licensed under: = += vendor/github.com/container-storage-interface/spec/lib/go/csi licensed under: = -Apache License + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -17136,13 +17545,12 @@ Apache License See the License for the specific language governing permissions and limitations under the License. - -= vendor/github.com/codedellemc/goscaleio/LICENSE d2794c0df5b907fdace235a619d80314 += vendor/github.com/container-storage-interface/spec/LICENSE e3fc50a88d0a364313df4b21ef20c29e ================================================================================ ================================================================================ -= vendor/github.com/container-storage-interface/spec/lib/go/csi/v0 licensed under: = += vendor/github.com/containerd/console licensed under: = Apache License Version 2.0, January 2004 @@ -17324,7 +17732,7 @@ Apache License APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -17332,7 +17740,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17346,16 +17754,17 @@ Apache License See the License for the specific language governing permissions and limitations under the License. -= vendor/github.com/container-storage-interface/spec/LICENSE e3fc50a88d0a364313df4b21ef20c29e += vendor/github.com/containerd/console/LICENSE 86d3f3a95c324c9479bd8986968f4327 ================================================================================ ================================================================================ -= vendor/github.com/containerd/console licensed under: = += vendor/github.com/containerd/containerd/api/services/containers/v1 licensed under: = + Apache License Version 2.0, January 2004 - http://www.apache.org/licenses/ + https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -17530,24 +17939,13 @@ Apache License END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] + Copyright 2013-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -17555,12 +17953,12 @@ Apache License See the License for the specific language governing permissions and limitations under the License. -= vendor/github.com/containerd/console/LICENSE 86d3f3a95c324c9479bd8986968f4327 += vendor/github.com/containerd/containerd/LICENSE.code aadc30f9c14d876ded7bedc0afd2d3d7 ================================================================================ ================================================================================ -= vendor/github.com/containerd/containerd/api/services/containers/v1 licensed under: = += vendor/github.com/containerd/containerd/api/services/tasks/v1 licensed under: = Apache License @@ -17759,7 +18157,7 @@ Apache License ================================================================================ -= vendor/github.com/containerd/containerd/api/services/tasks/v1 licensed under: = += vendor/github.com/containerd/containerd/api/services/version/v1 licensed under: = Apache License @@ -17958,7 +18356,7 @@ Apache License ================================================================================ -= vendor/github.com/containerd/containerd/api/services/version/v1 licensed under: = += vendor/github.com/containerd/containerd/api/types licensed under: = Apache License @@ -18157,7 +18555,7 @@ Apache License ================================================================================ -= vendor/github.com/containerd/containerd/api/types licensed under: = += vendor/github.com/containerd/containerd/api/types/task licensed under: = Apache License @@ -18356,7 +18754,7 @@ Apache License ================================================================================ -= vendor/github.com/containerd/containerd/api/types/task licensed under: = += vendor/github.com/containerd/containerd/containers licensed under: = Apache License @@ -18555,7 +18953,7 @@ Apache License ================================================================================ -= vendor/github.com/containerd/containerd/containers licensed under: = += vendor/github.com/containerd/containerd/dialer licensed under: = Apache License @@ -18754,7 +19152,7 @@ Apache License ================================================================================ -= vendor/github.com/containerd/containerd/dialer licensed under: = += vendor/github.com/containerd/containerd/errdefs licensed under: = Apache License @@ -18953,7 +19351,7 @@ Apache License ================================================================================ -= vendor/github.com/containerd/containerd/errdefs licensed under: = += vendor/github.com/containerd/containerd/namespaces licensed under: = Apache License @@ -19152,12 +19550,11 @@ Apache License ================================================================================ -= vendor/github.com/containerd/containerd/namespaces licensed under: = - += vendor/github.com/containernetworking/cni/libcni licensed under: = Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -19332,13 +19729,24 @@ Apache License END OF TERMS AND CONDITIONS - Copyright 2013-2016 Docker, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -19346,12 +19754,13 @@ Apache License See the License for the specific language governing permissions and limitations under the License. -= vendor/github.com/containerd/containerd/LICENSE.code aadc30f9c14d876ded7bedc0afd2d3d7 + += vendor/github.com/containernetworking/cni/LICENSE fa818a259cbed7ce8bc2a22d35a464fc ================================================================================ ================================================================================ -= vendor/github.com/containernetworking/cni/libcni licensed under: = += vendor/github.com/containernetworking/cni/pkg/invoke licensed under: = Apache License Version 2.0, January 2004 @@ -19561,7 +19970,7 @@ Apache License ================================================================================ -= vendor/github.com/containernetworking/cni/pkg/invoke licensed under: = += vendor/github.com/containernetworking/cni/pkg/types licensed under: = Apache License Version 2.0, January 2004 @@ -19771,7 +20180,7 @@ Apache License ================================================================================ -= vendor/github.com/containernetworking/cni/pkg/types licensed under: = += vendor/github.com/containernetworking/cni/pkg/types/020 licensed under: = Apache License Version 2.0, January 2004 @@ -19981,7 +20390,7 @@ Apache License ================================================================================ -= vendor/github.com/containernetworking/cni/pkg/types/020 licensed under: = += vendor/github.com/containernetworking/cni/pkg/types/current licensed under: = Apache License Version 2.0, January 2004 @@ -20191,7 +20600,7 @@ Apache License ================================================================================ -= vendor/github.com/containernetworking/cni/pkg/types/current licensed under: = += vendor/github.com/containernetworking/cni/pkg/version licensed under: = Apache License Version 2.0, January 2004 @@ -20401,7 +20810,36 @@ Apache License ================================================================================ -= vendor/github.com/containernetworking/cni/pkg/version licensed under: = += vendor/github.com/coreos/bbolt licensed under: = + +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + += vendor/github.com/coreos/bbolt/LICENSE 13b2a308eefa10d841e3bf2467dbe07a +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/alarm licensed under: = + Apache License Version 2.0, January 2004 @@ -20583,7 +21021,7 @@ Apache License APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -20591,7 +21029,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20605,41 +21043,12 @@ Apache License See the License for the specific language governing permissions and limitations under the License. - -= vendor/github.com/containernetworking/cni/LICENSE fa818a259cbed7ce8bc2a22d35a464fc -================================================================================ - - -================================================================================ -= vendor/github.com/coreos/bbolt licensed under: = - -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -= vendor/github.com/coreos/bbolt/LICENSE 13b2a308eefa10d841e3bf2467dbe07a += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ -= vendor/github.com/coreos/etcd/alarm licensed under: = += vendor/github.com/coreos/etcd/auth licensed under: = Apache License @@ -20849,7 +21258,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/auth licensed under: = += vendor/github.com/coreos/etcd/auth/authpb licensed under: = Apache License @@ -21059,7 +21468,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/auth/authpb licensed under: = += vendor/github.com/coreos/etcd/client licensed under: = Apache License @@ -21269,7 +21678,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/client licensed under: = += vendor/github.com/coreos/etcd/clientv3 licensed under: = Apache License @@ -21479,7 +21888,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/clientv3 licensed under: = += vendor/github.com/coreos/etcd/clientv3/concurrency licensed under: = Apache License @@ -21689,7 +22098,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/clientv3/concurrency licensed under: = += vendor/github.com/coreos/etcd/clientv3/namespace licensed under: = Apache License @@ -21899,7 +22308,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/clientv3/namespace licensed under: = += vendor/github.com/coreos/etcd/clientv3/naming licensed under: = Apache License @@ -22109,7 +22518,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/clientv3/naming licensed under: = += vendor/github.com/coreos/etcd/compactor licensed under: = Apache License @@ -22319,7 +22728,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/compactor licensed under: = += vendor/github.com/coreos/etcd/discovery licensed under: = Apache License @@ -22529,7 +22938,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/discovery licensed under: = += vendor/github.com/coreos/etcd/embed licensed under: = Apache License @@ -22739,7 +23148,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/embed licensed under: = += vendor/github.com/coreos/etcd/error licensed under: = Apache License @@ -22949,7 +23358,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/error licensed under: = += vendor/github.com/coreos/etcd/etcdserver licensed under: = Apache License @@ -23159,7 +23568,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/etcdserver licensed under: = += vendor/github.com/coreos/etcd/etcdserver/api licensed under: = Apache License @@ -23369,7 +23778,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/etcdserver/api licensed under: = += vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp licensed under: = Apache License @@ -23579,7 +23988,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp licensed under: = += vendor/github.com/coreos/etcd/etcdserver/api/v2http licensed under: = Apache License @@ -23789,7 +24198,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/etcdserver/api/v2http licensed under: = += vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes licensed under: = Apache License @@ -23999,7 +24408,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes licensed under: = += vendor/github.com/coreos/etcd/etcdserver/api/v2v3 licensed under: = Apache License @@ -30929,7 +31338,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/pkg/monotime licensed under: = += vendor/github.com/coreos/etcd/pkg/netutil licensed under: = Apache License @@ -31139,7 +31548,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/pkg/netutil licensed under: = += vendor/github.com/coreos/etcd/pkg/pathutil licensed under: = Apache License @@ -31349,7 +31758,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/pkg/pathutil licensed under: = += vendor/github.com/coreos/etcd/pkg/pbutil licensed under: = Apache License @@ -31559,7 +31968,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/pkg/pbutil licensed under: = += vendor/github.com/coreos/etcd/pkg/runtime licensed under: = Apache License @@ -31769,7 +32178,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/pkg/runtime licensed under: = += vendor/github.com/coreos/etcd/pkg/schedule licensed under: = Apache License @@ -31979,7 +32388,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/pkg/schedule licensed under: = += vendor/github.com/coreos/etcd/pkg/srv licensed under: = Apache License @@ -32189,7 +32598,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/pkg/srv licensed under: = += vendor/github.com/coreos/etcd/pkg/testutil licensed under: = Apache License @@ -32399,7 +32808,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/pkg/testutil licensed under: = += vendor/github.com/coreos/etcd/pkg/tlsutil licensed under: = Apache License @@ -32609,7 +33018,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/pkg/tlsutil licensed under: = += vendor/github.com/coreos/etcd/pkg/transport licensed under: = Apache License @@ -32819,7 +33228,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/pkg/transport licensed under: = += vendor/github.com/coreos/etcd/pkg/types licensed under: = Apache License @@ -33029,7 +33438,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/pkg/types licensed under: = += vendor/github.com/coreos/etcd/pkg/wait licensed under: = Apache License @@ -33239,7 +33648,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/pkg/wait licensed under: = += vendor/github.com/coreos/etcd/proxy/grpcproxy licensed under: = Apache License @@ -33449,7 +33858,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/proxy/grpcproxy licensed under: = += vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter licensed under: = Apache License @@ -33659,7 +34068,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter licensed under: = += vendor/github.com/coreos/etcd/proxy/grpcproxy/cache licensed under: = Apache License @@ -33869,7 +34278,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/proxy/grpcproxy/cache licensed under: = += vendor/github.com/coreos/etcd/raft licensed under: = Apache License @@ -34079,7 +34488,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/raft licensed under: = += vendor/github.com/coreos/etcd/raft/raftpb licensed under: = Apache License @@ -34289,7 +34698,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/raft/raftpb licensed under: = += vendor/github.com/coreos/etcd/rafthttp licensed under: = Apache License @@ -34499,7 +34908,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/rafthttp licensed under: = += vendor/github.com/coreos/etcd/snap licensed under: = Apache License @@ -34709,7 +35118,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/snap licensed under: = += vendor/github.com/coreos/etcd/snap/snappb licensed under: = Apache License @@ -34919,7 +35328,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/snap/snappb licensed under: = += vendor/github.com/coreos/etcd/store licensed under: = Apache License @@ -35129,7 +35538,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/store licensed under: = += vendor/github.com/coreos/etcd/version licensed under: = Apache License @@ -35339,7 +35748,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/version licensed under: = += vendor/github.com/coreos/etcd/wal licensed under: = Apache License @@ -35549,7 +35958,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/wal licensed under: = += vendor/github.com/coreos/etcd/wal/walpb licensed under: = Apache License @@ -35759,10 +36168,9 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -= vendor/github.com/coreos/etcd/wal/walpb licensed under: = - += vendor/github.com/coreos/go-oidc licensed under: = - Apache License +Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -35942,7 +36350,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -35950,7 +36358,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -35964,225 +36372,16 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. See the License for the specific language governing permissions and limitations under the License. -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 + += vendor/github.com/coreos/go-oidc/LICENSE d2794c0df5b907fdace235a619d80314 ================================================================================ ================================================================================ -= vendor/github.com/coreos/go-oidc licensed under: = += vendor/github.com/coreos/go-semver/semver licensed under: = -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -= vendor/github.com/coreos/go-oidc/LICENSE d2794c0df5b907fdace235a619d80314 -================================================================================ - - -================================================================================ -= vendor/github.com/coreos/go-semver/semver licensed under: = - - - Apache License + + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -46006,6 +46205,72 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/github.com/globalsign/mgo/bson licensed under: = + +mgo - MongoDB driver for Go + +Copyright (c) 2010-2013 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/github.com/globalsign/mgo/LICENSE 566e96676859b5704130b80941bc9f1f +================================================================================ + + +================================================================================ += vendor/github.com/globalsign/mgo/internal/json licensed under: = + +mgo - MongoDB driver for Go + +Copyright (c) 2010-2013 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/github.com/globalsign/mgo/LICENSE 566e96676859b5704130b80941bc9f1f +================================================================================ + + ================================================================================ = vendor/github.com/go-ini/ini licensed under: = @@ -46415,6 +46680,216 @@ third-party archives. ================================================================================ +================================================================================ += vendor/github.com/go-openapi/analysis/internal licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/go-openapi/analysis/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/go-openapi/errors licensed under: = @@ -48305,6 +48780,56 @@ third-party archives. ================================================================================ +================================================================================ += vendor/github.com/go-ozzo/ozzo-validation licensed under: = + +The MIT License (MIT) +Copyright (c) 2016, Qiang Xue + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software +and associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + += vendor/github.com/go-ozzo/ozzo-validation/LICENSE da12d993f2ce14947ad6eec35520b081 +================================================================================ + + +================================================================================ += vendor/github.com/go-ozzo/ozzo-validation/is licensed under: = + +The MIT License (MIT) +Copyright (c) 2016, Qiang Xue + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software +and associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + += vendor/github.com/go-ozzo/ozzo-validation/LICENSE da12d993f2ce14947ad6eec35520b081 +================================================================================ + + ================================================================================ = vendor/github.com/go-sql-driver/mysql licensed under: = @@ -49863,205 +50388,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -================================================================================ -= vendor/github.com/golang/glog licensed under: = - -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/golang/glog/LICENSE 19cbd64715b51267a47bf3750cc6a8a5 -================================================================================ - - ================================================================================ = vendor/github.com/golang/groupcache/lru licensed under: = @@ -66944,6 +67270,214 @@ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR I ================================================================================ +================================================================================ += vendor/github.com/grpc-ecosystem/go-grpc-middleware licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. += vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE 7ab5c73bb7e4679b16dd7c11b3559acf +================================================================================ + + ================================================================================ = vendor/github.com/grpc-ecosystem/go-grpc-prometheus licensed under: = @@ -71793,6 +72327,39 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/github.com/karrick/godirwalk licensed under: = + +BSD 2-Clause License + +Copyright (c) 2017, Karrick McDermott +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/github.com/karrick/godirwalk/LICENSE 7bea66fc0a31c6329f9392034bee75d2 +================================================================================ + + ================================================================================ = vendor/github.com/kisielk/sqlstruct licensed under: = @@ -73547,9 +74114,202 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLI ================================================================================ -= vendor/github.com/lpabon/godbc licensed under: = += vendor/github.com/magiconair/properties licensed under: = -Apache License +goproperties - properties file decoder for Go + +Copyright (c) 2013-2014 - Frank Schroeder + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/github.com/magiconair/properties/LICENSE c383a608fb9a0d227953e928803b9631 +================================================================================ + + +================================================================================ += vendor/github.com/mailru/easyjson/buffer licensed under: = + +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + += vendor/github.com/mailru/easyjson/LICENSE 819e81c2ec13e1bbc47dc5e90bb4d88b +================================================================================ + + +================================================================================ += vendor/github.com/mailru/easyjson/jlexer licensed under: = + +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + += vendor/github.com/mailru/easyjson/LICENSE 819e81c2ec13e1bbc47dc5e90bb4d88b +================================================================================ + + +================================================================================ += vendor/github.com/mailru/easyjson/jwriter licensed under: = + +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + += vendor/github.com/mailru/easyjson/LICENSE 819e81c2ec13e1bbc47dc5e90bb4d88b +================================================================================ + + +================================================================================ += vendor/github.com/MakeNowJust/heredoc licensed under: = + +The MIT License (MIT) + +Copyright (c) 2014-2017 TSUYUSATO Kitsune + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/github.com/MakeNowJust/heredoc/LICENSE 59c4411f6d7dfdaa85623e672d3d4438 +================================================================================ + + +================================================================================ += vendor/github.com/marstr/guid licensed under: = + +MIT License + +Copyright (c) 2016 Martin Strobel + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. += vendor/github.com/marstr/guid/LICENSE.txt 356484d12e6ad8a7c2d360b236e9a9c8 +================================================================================ + + +================================================================================ += vendor/github.com/mattn/go-shellwords licensed under: = + +The MIT License (MIT) + +Copyright (c) 2017 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + += vendor/github.com/mattn/go-shellwords/LICENSE e5116fc268f5118168ff06a271b50ef9 +================================================================================ + + +================================================================================ += vendor/github.com/mattn/go-sqlite3 licensed under: = + +The MIT License (MIT) + +Copyright (c) 2014 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + += vendor/github.com/mattn/go-sqlite3/LICENSE 2b7590a6661bc1940f50329c495898c6 +================================================================================ + + +================================================================================ += vendor/github.com/matttproud/golang_protobuf_extensions/pbutil licensed under: = + + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -73750,207 +74510,15 @@ Apache License WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -= vendor/github.com/lpabon/godbc/LICENSE 6c4db32a2fa8717faffa1d4f10136f47 -================================================================================ - - -================================================================================ -= vendor/github.com/magiconair/properties licensed under: = - -goproperties - properties file decoder for Go - -Copyright (c) 2013-2014 - Frank Schroeder - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -= vendor/github.com/magiconair/properties/LICENSE c383a608fb9a0d227953e928803b9631 -================================================================================ - - -================================================================================ -= vendor/github.com/mailru/easyjson/buffer licensed under: = - -Copyright (c) 2016 Mail.Ru Group - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -= vendor/github.com/mailru/easyjson/LICENSE 819e81c2ec13e1bbc47dc5e90bb4d88b -================================================================================ - - -================================================================================ -= vendor/github.com/mailru/easyjson/jlexer licensed under: = - -Copyright (c) 2016 Mail.Ru Group - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -= vendor/github.com/mailru/easyjson/LICENSE 819e81c2ec13e1bbc47dc5e90bb4d88b -================================================================================ - - -================================================================================ -= vendor/github.com/mailru/easyjson/jwriter licensed under: = - -Copyright (c) 2016 Mail.Ru Group - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -= vendor/github.com/mailru/easyjson/LICENSE 819e81c2ec13e1bbc47dc5e90bb4d88b -================================================================================ - - -================================================================================ -= vendor/github.com/MakeNowJust/heredoc licensed under: = - -The MIT License (MIT) - -Copyright (c) 2014-2017 TSUYUSATO Kitsune - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -= vendor/github.com/MakeNowJust/heredoc/LICENSE 59c4411f6d7dfdaa85623e672d3d4438 -================================================================================ - - -================================================================================ -= vendor/github.com/marstr/guid licensed under: = - -MIT License - -Copyright (c) 2016 Martin Strobel - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -= vendor/github.com/marstr/guid/LICENSE.txt 356484d12e6ad8a7c2d360b236e9a9c8 -================================================================================ - - -================================================================================ -= vendor/github.com/mattn/go-shellwords licensed under: = - -The MIT License (MIT) - -Copyright (c) 2017 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -= vendor/github.com/mattn/go-shellwords/LICENSE e5116fc268f5118168ff06a271b50ef9 -================================================================================ - - -================================================================================ -= vendor/github.com/mattn/go-sqlite3 licensed under: = - -The MIT License (MIT) - -Copyright (c) 2014 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -= vendor/github.com/mattn/go-sqlite3/LICENSE 2b7590a6661bc1940f50329c495898c6 += vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE e3fc50a88d0a364313df4b21ef20c29e ================================================================================ ================================================================================ -= vendor/github.com/matttproud/golang_protobuf_extensions/pbutil licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib licensed under: = - Apache License +Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -74151,13 +74719,12 @@ SOFTWARE. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - -= vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE e3fc50a88d0a364313df4b21ef20c29e += vendor/github.com/mesos/mesos-go/LICENSE 6c4db32a2fa8717faffa1d4f10136f47 ================================================================================ ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib/agent licensed under: = Apache License Version 2.0, January 2004 @@ -74365,7 +74932,7 @@ Apache License ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib/agent licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib/agent/calls licensed under: = Apache License Version 2.0, January 2004 @@ -74573,7 +75140,7 @@ Apache License ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib/agent/calls licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib/client licensed under: = Apache License Version 2.0, January 2004 @@ -74781,7 +75348,7 @@ Apache License ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib/client licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib/debug licensed under: = Apache License Version 2.0, January 2004 @@ -74989,7 +75556,7 @@ Apache License ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib/debug licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib/encoding licensed under: = Apache License Version 2.0, January 2004 @@ -75197,7 +75764,7 @@ Apache License ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib/encoding licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/codecs licensed under: = Apache License Version 2.0, January 2004 @@ -75405,7 +75972,7 @@ Apache License ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/codecs licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/framing licensed under: = Apache License Version 2.0, January 2004 @@ -75613,7 +76180,7 @@ Apache License ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/framing licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/json licensed under: = Apache License Version 2.0, January 2004 @@ -75821,7 +76388,7 @@ Apache License ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/json licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/proto licensed under: = Apache License Version 2.0, January 2004 @@ -76029,7 +76596,7 @@ Apache License ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/proto licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli licensed under: = Apache License Version 2.0, January 2004 @@ -76237,7 +76804,7 @@ Apache License ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/apierrors licensed under: = Apache License Version 2.0, January 2004 @@ -76445,7 +77012,7 @@ Apache License ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/apierrors licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib/recordio licensed under: = Apache License Version 2.0, January 2004 @@ -76653,7 +77220,7 @@ Apache License ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib/recordio licensed under: = += vendor/github.com/mesos/mesos-go/api/v1/lib/roles licensed under: = Apache License Version 2.0, January 2004 @@ -76861,9 +77428,9 @@ Apache License ================================================================================ -= vendor/github.com/mesos/mesos-go/api/v1/lib/roles licensed under: = += vendor/github.com/mholt/caddy/caddyfile licensed under: = -Apache License + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -77064,12 +77631,112 @@ Apache License WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -= vendor/github.com/mesos/mesos-go/LICENSE 6c4db32a2fa8717faffa1d4f10136f47 + += vendor/github.com/mholt/caddy/LICENSE.txt e3fc50a88d0a364313df4b21ef20c29e ================================================================================ ================================================================================ -= vendor/github.com/mholt/caddy/caddyfile licensed under: = += vendor/github.com/Microsoft/go-winio licensed under: = + +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + += vendor/github.com/Microsoft/go-winio/LICENSE 69205ff73858f2c22b2ca135b557e8ef +================================================================================ + + +================================================================================ += vendor/github.com/Microsoft/hcsshim licensed under: = + +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. += vendor/github.com/Microsoft/hcsshim/LICENSE d4c2cbbea5ee1e7c86dff68a7073718e +================================================================================ + + +================================================================================ += vendor/github.com/miekg/dns licensed under: = + +Extensions of the original work are copyright (c) 2011 Miek Gieben + +As this is fork of the official Go code the same license applies: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + += vendor/github.com/miekg/dns/LICENSE 147353de6868a20caa562d26eab7b3c5 +================================================================================ + + +================================================================================ += vendor/github.com/mindprince/gonvml licensed under: = + Apache License Version 2.0, January 2004 @@ -77251,7 +77918,7 @@ Apache License APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -77259,7 +77926,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -77273,113 +77940,14 @@ Apache License See the License for the specific language governing permissions and limitations under the License. -= vendor/github.com/mholt/caddy/LICENSE.txt e3fc50a88d0a364313df4b21ef20c29e -================================================================================ - - -================================================================================ -= vendor/github.com/Microsoft/go-winio licensed under: = - -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -= vendor/github.com/Microsoft/go-winio/LICENSE 69205ff73858f2c22b2ca135b557e8ef -================================================================================ - - -================================================================================ -= vendor/github.com/Microsoft/hcsshim licensed under: = - -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -= vendor/github.com/Microsoft/hcsshim/LICENSE d4c2cbbea5ee1e7c86dff68a7073718e -================================================================================ - - -================================================================================ -= vendor/github.com/miekg/dns licensed under: = - -Extensions of the original work are copyright (c) 2011 Miek Gieben - -As this is fork of the official Go code the same license applies: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -= vendor/github.com/miekg/dns/LICENSE 147353de6868a20caa562d26eab7b3c5 += vendor/github.com/mindprince/gonvml/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ -= vendor/github.com/mindprince/gonvml licensed under: = - += vendor/github.com/mistifyio/go-zfs licensed under: = - Apache License +Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -77559,7 +78127,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -77567,7 +78135,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright (c) 2014, OmniTI Computer Consulting, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -77580,15 +78148,72 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. += vendor/github.com/mistifyio/go-zfs/LICENSE cce9462224bfb44c1866ef7bd5eddf54 +================================================================================ + -= vendor/github.com/mindprince/gonvml/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ += vendor/github.com/mitchellh/go-wordwrap licensed under: = + +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/github.com/mitchellh/go-wordwrap/LICENSE.md 56da355a12d4821cda57b8f23ec34bc4 ================================================================================ -= vendor/github.com/mistifyio/go-zfs licensed under: = -Apache License + +================================================================================ += vendor/github.com/mitchellh/mapstructure licensed under: = + +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/github.com/mitchellh/mapstructure/LICENSE 3f7765c3d4f58e1f84c4313cecf0f5bd +================================================================================ + + +================================================================================ += vendor/github.com/modern-go/concurrent licensed under: = + + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -77768,7 +78393,7 @@ Apache License APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -77776,7 +78401,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright (c) 2014, OmniTI Computer Consulting, Inc. + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -77789,70 +78414,13 @@ Apache License WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -= vendor/github.com/mistifyio/go-zfs/LICENSE cce9462224bfb44c1866ef7bd5eddf54 -================================================================================ - - -================================================================================ -= vendor/github.com/mitchellh/go-wordwrap licensed under: = - -The MIT License (MIT) - -Copyright (c) 2014 Mitchell Hashimoto -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -= vendor/github.com/mitchellh/go-wordwrap/LICENSE.md 56da355a12d4821cda57b8f23ec34bc4 -================================================================================ - - -================================================================================ -= vendor/github.com/mitchellh/mapstructure licensed under: = - -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -= vendor/github.com/mitchellh/mapstructure/LICENSE 3f7765c3d4f58e1f84c4313cecf0f5bd += vendor/github.com/modern-go/concurrent/LICENSE 86d3f3a95c324c9479bd8986968f4327 ================================================================================ ================================================================================ -= vendor/github.com/modern-go/concurrent licensed under: = += vendor/github.com/modern-go/reflect2 licensed under: = Apache License Version 2.0, January 2004 @@ -78056,251 +78624,42 @@ THE SOFTWARE. See the License for the specific language governing permissions and limitations under the License. -= vendor/github.com/modern-go/concurrent/LICENSE 86d3f3a95c324c9479bd8986968f4327 += vendor/github.com/modern-go/reflect2/LICENSE 86d3f3a95c324c9479bd8986968f4327 ================================================================================ ================================================================================ -= vendor/github.com/modern-go/reflect2 licensed under: = - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/modern-go/reflect2/LICENSE 86d3f3a95c324c9479bd8986968f4327 -================================================================================ - - -================================================================================ -= vendor/github.com/mohae/deepcopy licensed under: = - -The MIT License (MIT) - -Copyright (c) 2014 Joel - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -= vendor/github.com/mohae/deepcopy/LICENSE 268dc9c546e3de67a93c1d12a039d702 -================================================================================ - - -================================================================================ -= vendor/github.com/mrunalp/fileutils licensed under: = - += vendor/github.com/mohae/deepcopy licensed under: = + +The MIT License (MIT) + +Copyright (c) 2014 Joel + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + += vendor/github.com/mohae/deepcopy/LICENSE 268dc9c546e3de67a93c1d12a039d702 +================================================================================ + + +================================================================================ += vendor/github.com/mrunalp/fileutils licensed under: = + Apache License Version 2.0, January 2004 @@ -87394,6 +87753,41 @@ THE SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/sigma/go-inotify licensed under: = + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/github.com/sigma/go-inotify/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707 +================================================================================ + + ================================================================================ = vendor/github.com/sirupsen/logrus licensed under: = @@ -87423,6 +87817,216 @@ THE SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/soheilhy/cmux licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/soheilhy/cmux/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/spf13/afero licensed under: = @@ -88819,38 +89423,17 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/github.com/tools/godep licensed under: = += vendor/github.com/tmc/grpc-websocket-proxy/wsproxy licensed under: = -Copyright © 2013 Keith Rarick. -Portions Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (C) 2016 Travis Cline -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -= vendor/github.com/tools/godep/License 71eb66e9b353dd06ca5a81ce0f469e1a += vendor/github.com/tmc/grpc-websocket-proxy/LICENSE 0894f9b225c28f0896b4bab675a2e19a ================================================================================ @@ -96899,6 +97482,222 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/go.uber.org/atomic licensed under: = + +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/go.uber.org/atomic/LICENSE.txt 1caee86519456feda989f8a838102b50 +================================================================================ + + +================================================================================ += vendor/go.uber.org/multierr licensed under: = + +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/go.uber.org/multierr/LICENSE.txt f65b21a547112d1bc7b11b90f9b31997 +================================================================================ + + +================================================================================ += vendor/go.uber.org/zap licensed under: = + +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/go.uber.org/zap/LICENSE.txt 5e8153e456a82529ea845e0d511abb69 +================================================================================ + + +================================================================================ += vendor/go.uber.org/zap/buffer licensed under: = + +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/go.uber.org/zap/LICENSE.txt 5e8153e456a82529ea845e0d511abb69 +================================================================================ + + +================================================================================ += vendor/go.uber.org/zap/internal/bufferpool licensed under: = + +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/go.uber.org/zap/LICENSE.txt 5e8153e456a82529ea845e0d511abb69 +================================================================================ + + +================================================================================ += vendor/go.uber.org/zap/internal/color licensed under: = + +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/go.uber.org/zap/LICENSE.txt 5e8153e456a82529ea845e0d511abb69 +================================================================================ + + +================================================================================ += vendor/go.uber.org/zap/internal/exit licensed under: = + +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/go.uber.org/zap/LICENSE.txt 5e8153e456a82529ea845e0d511abb69 +================================================================================ + + +================================================================================ += vendor/go.uber.org/zap/zapcore licensed under: = + +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/go.uber.org/zap/LICENSE.txt 5e8153e456a82529ea845e0d511abb69 +================================================================================ + + ================================================================================ = vendor/golang.org/x/crypto/bcrypt licensed under: = @@ -97495,9 +98294,44 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/golang.org/x/exp/inotify licensed under: = += vendor/golang.org/x/lint licensed under: = -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/golang.org/x/lint/LICENSE 4c728948788b1a02f33ae4e906546eef +================================================================================ + + +================================================================================ += vendor/golang.org/x/lint/golint licensed under: = + +Copyright (c) 2013 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -97525,7 +98359,7 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -= vendor/golang.org/x/exp/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707 += vendor/golang.org/x/lint/LICENSE 4c728948788b1a02f33ae4e906546eef ================================================================================ @@ -98965,7 +99799,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/golang.org/x/tools/go/vcs licensed under: = += vendor/golang.org/x/tools/go/gcexportdata licensed under: = Copyright (c) 2009 The Go Authors. All rights reserved. @@ -99000,7 +99834,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/golang.org/x/tools/imports licensed under: = += vendor/golang.org/x/tools/go/gcimporter15 licensed under: = Copyright (c) 2009 The Go Authors. All rights reserved. @@ -99035,187 +99869,77 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/blas licensed under: = += vendor/golang.org/x/tools/go/vcs licensed under: = -Copyright ©2013 The Gonum Authors. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the gonum project nor the names of its authors and - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -= vendor/gonum.org/v1/gonum/LICENSE 665e67d07d85e236cceb8de602c6255a -================================================================================ - - -================================================================================ -= vendor/gonum.org/v1/gonum/blas/blas64 licensed under: = - -Copyright ©2013 The Gonum Authors. All rights reserved. +modification, are permitted provided that the following conditions are +met: -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the gonum project nor the names of its authors and - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -= vendor/gonum.org/v1/gonum/LICENSE 665e67d07d85e236cceb8de602c6255a -================================================================================ - - -================================================================================ -= vendor/gonum.org/v1/gonum/blas/gonum licensed under: = -Copyright ©2013 The Gonum Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the gonum project nor the names of its authors and - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -= vendor/gonum.org/v1/gonum/LICENSE 665e67d07d85e236cceb8de602c6255a += vendor/golang.org/x/tools/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707 ================================================================================ ================================================================================ -= vendor/gonum.org/v1/gonum/floats licensed under: = += vendor/golang.org/x/tools/imports licensed under: = -Copyright ©2013 The Gonum Authors. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the gonum project nor the names of its authors and - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -= vendor/gonum.org/v1/gonum/LICENSE 665e67d07d85e236cceb8de602c6255a -================================================================================ - - -================================================================================ -= vendor/gonum.org/v1/gonum/graph licensed under: = - -Copyright ©2013 The Gonum Authors. All rights reserved. +modification, are permitted provided that the following conditions are +met: -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the gonum project nor the names of its authors and - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -= vendor/gonum.org/v1/gonum/LICENSE 665e67d07d85e236cceb8de602c6255a -================================================================================ - - -================================================================================ -= vendor/gonum.org/v1/gonum/graph/encoding licensed under: = - -Copyright ©2013 The Gonum Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the gonum project nor the names of its authors and - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -= vendor/gonum.org/v1/gonum/LICENSE 665e67d07d85e236cceb8de602c6255a += vendor/golang.org/x/tools/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707 ================================================================================ ================================================================================ -= vendor/gonum.org/v1/gonum/graph/encoding/dot licensed under: = += vendor/gonum.org/v1/gonum/blas licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99245,7 +99969,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/graph/formats/dot licensed under: = += vendor/gonum.org/v1/gonum/blas/blas64 licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99275,7 +99999,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/graph/formats/dot/ast licensed under: = += vendor/gonum.org/v1/gonum/blas/gonum licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99305,7 +100029,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/graph/formats/dot/internal/astx licensed under: = += vendor/gonum.org/v1/gonum/floats licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99335,7 +100059,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/graph/formats/dot/internal/errors licensed under: = += vendor/gonum.org/v1/gonum/graph licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99365,7 +100089,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/graph/formats/dot/internal/lexer licensed under: = += vendor/gonum.org/v1/gonum/graph/encoding licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99395,7 +100119,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/graph/formats/dot/internal/parser licensed under: = += vendor/gonum.org/v1/gonum/graph/encoding/dot licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99425,7 +100149,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/graph/formats/dot/internal/token licensed under: = += vendor/gonum.org/v1/gonum/graph/formats/dot licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99455,7 +100179,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/graph/internal/ordered licensed under: = += vendor/gonum.org/v1/gonum/graph/formats/dot/ast licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99485,7 +100209,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/graph/internal/set licensed under: = += vendor/gonum.org/v1/gonum/graph/formats/dot/internal/astx licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99515,7 +100239,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/graph/internal/uid licensed under: = += vendor/gonum.org/v1/gonum/graph/formats/dot/internal/errors licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99545,7 +100269,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/graph/simple licensed under: = += vendor/gonum.org/v1/gonum/graph/formats/dot/internal/lexer licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99575,7 +100299,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/internal/asm/c128 licensed under: = += vendor/gonum.org/v1/gonum/graph/formats/dot/internal/parser licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99605,7 +100329,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/internal/asm/f32 licensed under: = += vendor/gonum.org/v1/gonum/graph/formats/dot/internal/token licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99635,7 +100359,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/internal/asm/f64 licensed under: = += vendor/gonum.org/v1/gonum/graph/internal/ordered licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99665,7 +100389,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/internal/math32 licensed under: = += vendor/gonum.org/v1/gonum/graph/internal/set licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99695,7 +100419,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/lapack licensed under: = += vendor/gonum.org/v1/gonum/graph/internal/uid licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99725,7 +100449,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/lapack/gonum licensed under: = += vendor/gonum.org/v1/gonum/graph/simple licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99755,7 +100479,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/lapack/lapack64 licensed under: = += vendor/gonum.org/v1/gonum/internal/asm/c128 licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99785,7 +100509,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/gonum.org/v1/gonum/mat licensed under: = += vendor/gonum.org/v1/gonum/internal/asm/f32 licensed under: = Copyright ©2013 The Gonum Authors. All rights reserved. @@ -99815,112 +100539,187 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/api/compute/v0.alpha licensed under: = += vendor/gonum.org/v1/gonum/internal/asm/f64 licensed under: = -Copyright (c) 2011 Google Inc. All rights reserved. +Copyright ©2013 The Gonum Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the gonum project nor the names of its authors and + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -= vendor/google.golang.org/api/LICENSE a651bb3d8b1c412632e28823bb432b40 += vendor/gonum.org/v1/gonum/LICENSE 665e67d07d85e236cceb8de602c6255a ================================================================================ ================================================================================ -= vendor/google.golang.org/api/compute/v0.beta licensed under: = += vendor/gonum.org/v1/gonum/internal/math32 licensed under: = -Copyright (c) 2011 Google Inc. All rights reserved. +Copyright ©2013 The Gonum Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the gonum project nor the names of its authors and + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. += vendor/gonum.org/v1/gonum/LICENSE 665e67d07d85e236cceb8de602c6255a +================================================================================ + -= vendor/google.golang.org/api/LICENSE a651bb3d8b1c412632e28823bb432b40 ================================================================================ += vendor/gonum.org/v1/gonum/lapack licensed under: = +Copyright ©2013 The Gonum Authors. All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the gonum project nor the names of its authors and + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. += vendor/gonum.org/v1/gonum/LICENSE 665e67d07d85e236cceb8de602c6255a ================================================================================ -= vendor/google.golang.org/api/compute/v1 licensed under: = -Copyright (c) 2011 Google Inc. All rights reserved. + +================================================================================ += vendor/gonum.org/v1/gonum/lapack/gonum licensed under: = + +Copyright ©2013 The Gonum Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the gonum project nor the names of its authors and + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. += vendor/gonum.org/v1/gonum/LICENSE 665e67d07d85e236cceb8de602c6255a +================================================================================ -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + +================================================================================ += vendor/gonum.org/v1/gonum/lapack/lapack64 licensed under: = + +Copyright ©2013 The Gonum Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the gonum project nor the names of its authors and + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. += vendor/gonum.org/v1/gonum/LICENSE 665e67d07d85e236cceb8de602c6255a +================================================================================ -= vendor/google.golang.org/api/LICENSE a651bb3d8b1c412632e28823bb432b40 + +================================================================================ += vendor/gonum.org/v1/gonum/mat licensed under: = + +Copyright ©2013 The Gonum Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the gonum project nor the names of its authors and + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. += vendor/gonum.org/v1/gonum/LICENSE 665e67d07d85e236cceb8de602c6255a ================================================================================ ================================================================================ -= vendor/google.golang.org/api/container/v1 licensed under: = += vendor/google.golang.org/api/compute/v0.alpha licensed under: = Copyright (c) 2011 Google Inc. All rights reserved. @@ -99955,7 +100754,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/api/gensupport licensed under: = += vendor/google.golang.org/api/compute/v0.beta licensed under: = Copyright (c) 2011 Google Inc. All rights reserved. @@ -99990,7 +100789,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/api/googleapi licensed under: = += vendor/google.golang.org/api/compute/v1 licensed under: = Copyright (c) 2011 Google Inc. All rights reserved. @@ -100025,7 +100824,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/api/googleapi/internal/uritemplates licensed under: = += vendor/google.golang.org/api/container/v1 licensed under: = Copyright (c) 2011 Google Inc. All rights reserved. @@ -100060,7 +100859,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/api/logging/v2beta1 licensed under: = += vendor/google.golang.org/api/gensupport licensed under: = Copyright (c) 2011 Google Inc. All rights reserved. @@ -100095,7 +100894,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/api/monitoring/v3 licensed under: = += vendor/google.golang.org/api/googleapi licensed under: = Copyright (c) 2011 Google Inc. All rights reserved. @@ -100130,7 +100929,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/api/pubsub/v1 licensed under: = += vendor/google.golang.org/api/googleapi/internal/uritemplates licensed under: = Copyright (c) 2011 Google Inc. All rights reserved. @@ -100165,7 +100964,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/api/tpu/v1 licensed under: = += vendor/google.golang.org/api/logging/v2beta1 licensed under: = Copyright (c) 2011 Google Inc. All rights reserved. @@ -100200,212 +100999,107 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/genproto/googleapis/api/annotations licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. += vendor/google.golang.org/api/monitoring/v3 licensed under: = - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: +Copyright (c) 2011 Google Inc. All rights reserved. - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. += vendor/google.golang.org/api/LICENSE a651bb3d8b1c412632e28823bb432b40 +================================================================================ - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +================================================================================ += vendor/google.golang.org/api/pubsub/v1 licensed under: = - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +Copyright (c) 2011 Google Inc. All rights reserved. - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - END OF TERMS AND CONDITIONS += vendor/google.golang.org/api/LICENSE a651bb3d8b1c412632e28823bb432b40 +================================================================================ - APPENDIX: How to apply the Apache License to your work. - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +================================================================================ += vendor/google.golang.org/api/tpu/v1 licensed under: = - Copyright [yyyy] [name of copyright owner] +Copyright (c) 2011 Google Inc. All rights reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - http://www.apache.org/licenses/LICENSE-2.0 + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -= vendor/google.golang.org/genproto/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/google.golang.org/api/LICENSE a651bb3d8b1c412632e28823bb432b40 ================================================================================ @@ -101039,6 +101733,426 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/google.golang.org/grpc/balancer/base licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/balancer/roundrobin licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/google.golang.org/grpc/codes licensed under: = @@ -101670,7 +102784,427 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages licensed under: = += vendor/google.golang.org/grpc/encoding licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/encoding/proto licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages licensed under: = Apache License @@ -102090,7 +103624,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/grpc/health/grpc_health_v1 licensed under: = += vendor/google.golang.org/grpc/health licensed under: = Apache License @@ -102300,7 +103834,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/grpc/internal licensed under: = += vendor/google.golang.org/grpc/health/grpc_health_v1 licensed under: = Apache License @@ -102510,7 +104044,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/grpc/keepalive licensed under: = += vendor/google.golang.org/grpc/internal licensed under: = Apache License @@ -102720,7 +104254,847 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/grpc/metadata licensed under: = += vendor/google.golang.org/grpc/internal/backoff licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/internal/channelz licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/internal/grpcrand licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/keepalive licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/metadata licensed under: = Apache License @@ -103140,7 +105514,1057 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/grpc/peer licensed under: = += vendor/google.golang.org/grpc/peer licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/resolver licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/resolver/dns licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/resolver/passthrough licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/stats licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/status licensed under: = Apache License @@ -103350,7 +106774,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/grpc/resolver licensed under: = += vendor/google.golang.org/grpc/tap licensed under: = Apache License @@ -103560,7 +106984,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/grpc/stats licensed under: = += vendor/google.golang.org/grpc/transport licensed under: = Apache License @@ -103770,217 +107194,215 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -= vendor/google.golang.org/grpc/status licensed under: = += vendor/gopkg.in/gcfg.v1 licensed under: = +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 1. Definitions. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. += vendor/gopkg.in/gcfg.v1/LICENSE 13cea479df204c85485b5db6eb1bc9d5 +================================================================================ - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. +================================================================================ += vendor/gopkg.in/gcfg.v1/scanner licensed under: = - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. += vendor/gopkg.in/gcfg.v1/LICENSE 13cea479df204c85485b5db6eb1bc9d5 +================================================================================ - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +================================================================================ += vendor/gopkg.in/gcfg.v1/token licensed under: = - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and += vendor/gopkg.in/gcfg.v1/LICENSE 13cea479df204c85485b5db6eb1bc9d5 +================================================================================ - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. +================================================================================ += vendor/gopkg.in/gcfg.v1/types licensed under: = - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. += vendor/gopkg.in/gcfg.v1/LICENSE 13cea479df204c85485b5db6eb1bc9d5 +================================================================================ - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS +================================================================================ += vendor/gopkg.in/inf.v0 licensed under: = - APPENDIX: How to apply the Apache License to your work. +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - Copyright [yyyy] [name of copyright owner] + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - http://www.apache.org/licenses/LICENSE-2.0 += vendor/gopkg.in/inf.v0/LICENSE 13cea479df204c85485b5db6eb1bc9d5 +================================================================================ - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ += vendor/gopkg.in/natefinch/lumberjack.v2 licensed under: = +The MIT License (MIT) + +Copyright (c) 2014 Nate Finch +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. += vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE 574cdb55b81249478f5af5f789e9e29f ================================================================================ -= vendor/google.golang.org/grpc/tap licensed under: = + + +================================================================================ += vendor/gopkg.in/square/go-jose.v2 licensed under: = Apache License @@ -104185,12 +107607,12 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. See the License for the specific language governing permissions and limitations under the License. -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/gopkg.in/square/go-jose.v2/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ -= vendor/google.golang.org/grpc/transport licensed under: = += vendor/gopkg.in/square/go-jose.v2/cipher licensed under: = Apache License @@ -104395,220 +107817,12 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. See the License for the specific language governing permissions and limitations under the License. -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/gopkg.in/gcfg.v1 licensed under: = - -Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go -Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -= vendor/gopkg.in/gcfg.v1/LICENSE 13cea479df204c85485b5db6eb1bc9d5 -================================================================================ - - -================================================================================ -= vendor/gopkg.in/gcfg.v1/scanner licensed under: = - -Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go -Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -= vendor/gopkg.in/gcfg.v1/LICENSE 13cea479df204c85485b5db6eb1bc9d5 -================================================================================ - - -================================================================================ -= vendor/gopkg.in/gcfg.v1/token licensed under: = - -Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go -Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -= vendor/gopkg.in/gcfg.v1/LICENSE 13cea479df204c85485b5db6eb1bc9d5 -================================================================================ - - -================================================================================ -= vendor/gopkg.in/gcfg.v1/types licensed under: = - -Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go -Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -= vendor/gopkg.in/gcfg.v1/LICENSE 13cea479df204c85485b5db6eb1bc9d5 -================================================================================ - - -================================================================================ -= vendor/gopkg.in/inf.v0 licensed under: = - -Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go -Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -= vendor/gopkg.in/inf.v0/LICENSE 13cea479df204c85485b5db6eb1bc9d5 -================================================================================ - - -================================================================================ -= vendor/gopkg.in/natefinch/lumberjack.v2 licensed under: = - -The MIT License (MIT) - -Copyright (c) 2014 Nate Finch - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -= vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE 574cdb55b81249478f5af5f789e9e29f += vendor/gopkg.in/square/go-jose.v2/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ -= vendor/gopkg.in/square/go-jose.v2 licensed under: = += vendor/gopkg.in/square/go-jose.v2/json licensed under: = Apache License @@ -104818,7 +108032,7 @@ SOFTWARE. ================================================================================ -= vendor/gopkg.in/square/go-jose.v2/cipher licensed under: = += vendor/gopkg.in/square/go-jose.v2/jwt licensed under: = Apache License @@ -105028,8 +108242,39 @@ SOFTWARE. ================================================================================ -= vendor/gopkg.in/square/go-jose.v2/json licensed under: = += vendor/gopkg.in/warnings.v0 licensed under: = + +Copyright (c) 2016 Péter Surányi. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/gopkg.in/warnings.v0/LICENSE c6775875c9d604beb22447dfae3d7049 +================================================================================ + + +================================================================================ += vendor/gopkg.in/yaml.v2 licensed under: = Apache License Version 2.0, January 2004 @@ -105211,7 +108456,7 @@ SOFTWARE. APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -105219,7 +108464,7 @@ SOFTWARE. same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -105233,12 +108478,12 @@ SOFTWARE. See the License for the specific language governing permissions and limitations under the License. -= vendor/gopkg.in/square/go-jose.v2/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/gopkg.in/yaml.v2/LICENSE e3fc50a88d0a364313df4b21ef20c29e ================================================================================ ================================================================================ -= vendor/gopkg.in/square/go-jose.v2/jwt licensed under: = += vendor/k8s.io/gengo/args licensed under: = Apache License @@ -105429,7 +108674,7 @@ SOFTWARE. same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -105443,65 +108688,12 @@ SOFTWARE. See the License for the specific language governing permissions and limitations under the License. -= vendor/gopkg.in/square/go-jose.v2/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/gopkg.in/warnings.v0 licensed under: = - -Copyright (c) 2016 Péter Surányi. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -= vendor/gopkg.in/warnings.v0/LICENSE c6775875c9d604beb22447dfae3d7049 -================================================================================ - - -================================================================================ -= vendor/gopkg.in/yaml.v2 licensed under: = - -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -= vendor/gopkg.in/yaml.v2/LICENSE 6964839e54f4fefcdae13f22b92d0fbb += vendor/k8s.io/gengo/LICENSE ad09685d909e7a9f763d2bb62d4bd6fb ================================================================================ ================================================================================ -= vendor/k8s.io/gengo/args licensed under: = += vendor/k8s.io/gengo/examples/deepcopy-gen/generators licensed under: = Apache License @@ -105711,7 +108903,7 @@ limitations under the License. ================================================================================ -= vendor/k8s.io/gengo/examples/deepcopy-gen/generators licensed under: = += vendor/k8s.io/gengo/examples/defaulter-gen/generators licensed under: = Apache License @@ -105921,7 +109113,7 @@ limitations under the License. ================================================================================ -= vendor/k8s.io/gengo/examples/defaulter-gen/generators licensed under: = += vendor/k8s.io/gengo/examples/import-boss/generators licensed under: = Apache License @@ -106131,7 +109323,7 @@ limitations under the License. ================================================================================ -= vendor/k8s.io/gengo/examples/import-boss/generators licensed under: = += vendor/k8s.io/gengo/examples/set-gen/generators licensed under: = Apache License @@ -106341,7 +109533,7 @@ limitations under the License. ================================================================================ -= vendor/k8s.io/gengo/examples/set-gen/generators licensed under: = += vendor/k8s.io/gengo/examples/set-gen/sets licensed under: = Apache License @@ -106551,7 +109743,7 @@ limitations under the License. ================================================================================ -= vendor/k8s.io/gengo/examples/set-gen/sets licensed under: = += vendor/k8s.io/gengo/generator licensed under: = Apache License @@ -106761,7 +109953,7 @@ limitations under the License. ================================================================================ -= vendor/k8s.io/gengo/generator licensed under: = += vendor/k8s.io/gengo/namer licensed under: = Apache License @@ -106971,7 +110163,7 @@ limitations under the License. ================================================================================ -= vendor/k8s.io/gengo/namer licensed under: = += vendor/k8s.io/gengo/parser licensed under: = Apache License @@ -107181,7 +110373,7 @@ limitations under the License. ================================================================================ -= vendor/k8s.io/gengo/parser licensed under: = += vendor/k8s.io/gengo/types licensed under: = Apache License @@ -107391,10 +110583,9 @@ limitations under the License. ================================================================================ -= vendor/k8s.io/gengo/types licensed under: = - += vendor/k8s.io/heapster/metrics/api/v1/types licensed under: = - Apache License +Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -107574,7 +110765,7 @@ limitations under the License. APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -107582,7 +110773,7 @@ limitations under the License. same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2014 The Kubernetes Authors. + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -107596,14 +110787,214 @@ limitations under the License. See the License for the specific language governing permissions and limitations under the License. -= vendor/k8s.io/gengo/LICENSE ad09685d909e7a9f763d2bb62d4bd6fb += vendor/k8s.io/heapster/LICENSE 136e4f49dbf29942c572a3a8f6e88a77 ================================================================================ ================================================================================ -= vendor/k8s.io/heapster/metrics/api/v1/types licensed under: = += vendor/k8s.io/klog licensed under: = Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/k8s.io/klog/LICENSE 19cbd64715b51267a47bf3750cc6a8a5 +================================================================================ + + +================================================================================ += vendor/k8s.io/kube-openapi/cmd/openapi-gen licensed under: = + + + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -107783,7 +111174,7 @@ Apache License APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -107791,7 +111182,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -107805,12 +111196,12 @@ Apache License See the License for the specific language governing permissions and limitations under the License. -= vendor/k8s.io/heapster/LICENSE 136e4f49dbf29942c572a3a8f6e88a77 += vendor/k8s.io/kube-openapi/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ -= vendor/k8s.io/kube-openapi/cmd/openapi-gen licensed under: = += vendor/k8s.io/kube-openapi/cmd/openapi-gen/args licensed under: = Apache License @@ -108020,7 +111411,7 @@ Apache License ================================================================================ -= vendor/k8s.io/kube-openapi/cmd/openapi-gen/args licensed under: = += vendor/k8s.io/kube-openapi/pkg/aggregator licensed under: = Apache License @@ -108230,7 +111621,7 @@ Apache License ================================================================================ -= vendor/k8s.io/kube-openapi/pkg/aggregator licensed under: = += vendor/k8s.io/kube-openapi/pkg/builder licensed under: = Apache License @@ -108440,7 +111831,7 @@ Apache License ================================================================================ -= vendor/k8s.io/kube-openapi/pkg/builder licensed under: = += vendor/k8s.io/kube-openapi/pkg/common licensed under: = Apache License @@ -108650,7 +112041,7 @@ Apache License ================================================================================ -= vendor/k8s.io/kube-openapi/pkg/common licensed under: = += vendor/k8s.io/kube-openapi/pkg/generators licensed under: = Apache License @@ -108860,7 +112251,7 @@ Apache License ================================================================================ -= vendor/k8s.io/kube-openapi/pkg/generators licensed under: = += vendor/k8s.io/kube-openapi/pkg/generators/rules licensed under: = Apache License @@ -109070,7 +112461,7 @@ Apache License ================================================================================ -= vendor/k8s.io/kube-openapi/pkg/generators/rules licensed under: = += vendor/k8s.io/kube-openapi/pkg/handler licensed under: = Apache License @@ -109280,7 +112671,7 @@ Apache License ================================================================================ -= vendor/k8s.io/kube-openapi/pkg/handler licensed under: = += vendor/k8s.io/kube-openapi/pkg/util licensed under: = Apache License @@ -109490,7 +112881,7 @@ Apache License ================================================================================ -= vendor/k8s.io/kube-openapi/pkg/util licensed under: = += vendor/k8s.io/kube-openapi/pkg/util/proto licensed under: = Apache License @@ -109700,7 +113091,7 @@ Apache License ================================================================================ -= vendor/k8s.io/kube-openapi/pkg/util/proto licensed under: = += vendor/k8s.io/kube-openapi/pkg/util/proto/testing licensed under: = Apache License @@ -109910,7 +113301,7 @@ Apache License ================================================================================ -= vendor/k8s.io/kube-openapi/pkg/util/proto/testing licensed under: = += vendor/k8s.io/kube-openapi/pkg/util/proto/validation licensed under: = Apache License @@ -110120,7 +113511,7 @@ Apache License ================================================================================ -= vendor/k8s.io/kube-openapi/pkg/util/proto/validation licensed under: = += vendor/k8s.io/kube-openapi/pkg/util/sets licensed under: = Apache License @@ -110330,7 +113721,7 @@ Apache License ================================================================================ -= vendor/k8s.io/kube-openapi/pkg/util/sets licensed under: = += vendor/k8s.io/utils/clock licensed under: = Apache License @@ -110535,12 +113926,12 @@ Apache License See the License for the specific language governing permissions and limitations under the License. -= vendor/k8s.io/kube-openapi/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/k8s.io/utils/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ -= vendor/k8s.io/utils/clock licensed under: = += vendor/k8s.io/utils/exec licensed under: = Apache License @@ -110750,7 +114141,7 @@ Apache License ================================================================================ -= vendor/k8s.io/utils/exec licensed under: = += vendor/k8s.io/utils/exec/testing licensed under: = Apache License @@ -110960,7 +114351,7 @@ Apache License ================================================================================ -= vendor/k8s.io/utils/exec/testing licensed under: = += vendor/k8s.io/utils/pointer licensed under: = Apache License @@ -111170,212 +114561,60 @@ Apache License ================================================================================ -= vendor/k8s.io/utils/pointer licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and += vendor/sigs.k8s.io/yaml licensed under: = - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +The MIT License (MIT) - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +Copyright (c) 2014 Sam Ghods - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - END OF TERMS AND CONDITIONS +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - APPENDIX: How to apply the Apache License to your work. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - Copyright [yyyy] [name of copyright owner] +Copyright (c) 2012 The Go Authors. All rights reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - http://www.apache.org/licenses/LICENSE-2.0 + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -= vendor/k8s.io/utils/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/sigs.k8s.io/yaml/LICENSE 0ceb9ff3b27d3a8cf451ca3785d73c71 ================================================================================ diff --git a/vendor/k8s.io/kubernetes/pkg/api/events/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/api/events/BUILD.bazel deleted file mode 100644 index 90913e8822203..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/events/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["sorted_event_list.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/api/events", - importpath = "k8s.io/kubernetes/pkg/api/events", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/events/OWNERS b/vendor/k8s.io/kubernetes/pkg/api/events/OWNERS deleted file mode 100755 index cbc4e8d9d7269..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/events/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -reviewers: -- gmarek diff --git a/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD.bazel deleted file mode 100644 index fd1aa3563cebe..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["util.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/api/pod", - importpath = "k8s.io/kubernetes/pkg/api/pod", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/features:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/pod/OWNERS b/vendor/k8s.io/kubernetes/pkg/api/pod/OWNERS deleted file mode 100755 index bead0f8a8ecb0..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/pod/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: -- smarterclayton -- thockin -- david-mcmahon diff --git a/vendor/k8s.io/kubernetes/pkg/api/pod/util.go b/vendor/k8s.io/kubernetes/pkg/api/pod/util.go deleted file mode 100644 index b9f875e57a77b..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/pod/util.go +++ /dev/null @@ -1,328 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pod - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/features" -) - -// Visitor is called with each object name, and returns true if visiting should continue -type Visitor func(name string) (shouldContinue bool) - -// VisitPodSecretNames invokes the visitor function with the name of every secret -// referenced by the pod spec. If visitor returns false, visiting is short-circuited. -// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. -// Returns true if visiting completed, false if visiting was short-circuited. -func VisitPodSecretNames(pod *api.Pod, visitor Visitor) bool { - for _, reference := range pod.Spec.ImagePullSecrets { - if !visitor(reference.Name) { - return false - } - } - for i := range pod.Spec.InitContainers { - if !visitContainerSecretNames(&pod.Spec.InitContainers[i], visitor) { - return false - } - } - for i := range pod.Spec.Containers { - if !visitContainerSecretNames(&pod.Spec.Containers[i], visitor) { - return false - } - } - var source *api.VolumeSource - for i := range pod.Spec.Volumes { - source = &pod.Spec.Volumes[i].VolumeSource - switch { - case source.AzureFile != nil: - if len(source.AzureFile.SecretName) > 0 && !visitor(source.AzureFile.SecretName) { - return false - } - case source.CephFS != nil: - if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) { - return false - } - case source.Cinder != nil: - if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) { - return false - } - case source.FlexVolume != nil: - if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) { - return false - } - case source.Projected != nil: - for j := range source.Projected.Sources { - if source.Projected.Sources[j].Secret != nil { - if !visitor(source.Projected.Sources[j].Secret.Name) { - return false - } - } - } - case source.RBD != nil: - if source.RBD.SecretRef != nil && !visitor(source.RBD.SecretRef.Name) { - return false - } - case source.Secret != nil: - if !visitor(source.Secret.SecretName) { - return false - } - case source.ScaleIO != nil: - if source.ScaleIO.SecretRef != nil && !visitor(source.ScaleIO.SecretRef.Name) { - return false - } - case source.ISCSI != nil: - if source.ISCSI.SecretRef != nil && !visitor(source.ISCSI.SecretRef.Name) { - return false - } - case source.StorageOS != nil: - if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Name) { - return false - } - } - } - return true -} - -func visitContainerSecretNames(container *api.Container, visitor Visitor) bool { - for _, env := range container.EnvFrom { - if env.SecretRef != nil { - if !visitor(env.SecretRef.Name) { - return false - } - } - } - for _, envVar := range container.Env { - if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil { - if !visitor(envVar.ValueFrom.SecretKeyRef.Name) { - return false - } - } - } - return true -} - -// VisitPodConfigmapNames invokes the visitor function with the name of every configmap -// referenced by the pod spec. If visitor returns false, visiting is short-circuited. -// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. -// Returns true if visiting completed, false if visiting was short-circuited. -func VisitPodConfigmapNames(pod *api.Pod, visitor Visitor) bool { - for i := range pod.Spec.InitContainers { - if !visitContainerConfigmapNames(&pod.Spec.InitContainers[i], visitor) { - return false - } - } - for i := range pod.Spec.Containers { - if !visitContainerConfigmapNames(&pod.Spec.Containers[i], visitor) { - return false - } - } - var source *api.VolumeSource - for i := range pod.Spec.Volumes { - source = &pod.Spec.Volumes[i].VolumeSource - switch { - case source.Projected != nil: - for j := range source.Projected.Sources { - if source.Projected.Sources[j].ConfigMap != nil { - if !visitor(source.Projected.Sources[j].ConfigMap.Name) { - return false - } - } - } - case source.ConfigMap != nil: - if !visitor(source.ConfigMap.Name) { - return false - } - } - } - return true -} - -func visitContainerConfigmapNames(container *api.Container, visitor Visitor) bool { - for _, env := range container.EnvFrom { - if env.ConfigMapRef != nil { - if !visitor(env.ConfigMapRef.Name) { - return false - } - } - } - for _, envVar := range container.Env { - if envVar.ValueFrom != nil && envVar.ValueFrom.ConfigMapKeyRef != nil { - if !visitor(envVar.ValueFrom.ConfigMapKeyRef.Name) { - return false - } - } - } - return true -} - -// IsPodReady returns true if a pod is ready; false otherwise. -func IsPodReady(pod *api.Pod) bool { - return IsPodReadyConditionTrue(pod.Status) -} - -// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. -func IsPodReadyConditionTrue(status api.PodStatus) bool { - condition := GetPodReadyCondition(status) - return condition != nil && condition.Status == api.ConditionTrue -} - -// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. -// Returns nil if the condition is not present. -func GetPodReadyCondition(status api.PodStatus) *api.PodCondition { - _, condition := GetPodCondition(&status, api.PodReady) - return condition -} - -// GetPodCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the index of the located condition. -func GetPodCondition(status *api.PodStatus, conditionType api.PodConditionType) (int, *api.PodCondition) { - if status == nil { - return -1, nil - } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return i, &status.Conditions[i] - } - } - return -1, nil -} - -// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the -// status has changed. -// Returns true if pod condition has changed or has been added. -func UpdatePodCondition(status *api.PodStatus, condition *api.PodCondition) bool { - condition.LastTransitionTime = metav1.Now() - // Try to find this pod condition. - conditionIndex, oldCondition := GetPodCondition(status, condition.Type) - - if oldCondition == nil { - // We are adding new pod condition. - status.Conditions = append(status.Conditions, *condition) - return true - } - // We are updating an existing condition, so we need to check if it has changed. - if condition.Status == oldCondition.Status { - condition.LastTransitionTime = oldCondition.LastTransitionTime - } - - isEqual := condition.Status == oldCondition.Status && - condition.Reason == oldCondition.Reason && - condition.Message == oldCondition.Message && - condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && - condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) - - status.Conditions[conditionIndex] = *condition - // Return true if one of the fields have changed. - return !isEqual -} - -// DropDisabledAlphaFields removes disabled fields from the pod spec. -// This should be called from PrepareForCreate/PrepareForUpdate for all resources containing a pod spec. -func DropDisabledAlphaFields(podSpec *api.PodSpec) { - if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) { - podSpec.Priority = nil - podSpec.PriorityClassName = "" - } - - if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { - for i := range podSpec.Volumes { - if podSpec.Volumes[i].EmptyDir != nil { - podSpec.Volumes[i].EmptyDir.SizeLimit = nil - } - } - } - - for i := range podSpec.Containers { - DropDisabledVolumeMountsAlphaFields(podSpec.Containers[i].VolumeMounts) - } - for i := range podSpec.InitContainers { - DropDisabledVolumeMountsAlphaFields(podSpec.InitContainers[i].VolumeMounts) - } - - DropDisabledVolumeDevicesAlphaFields(podSpec) - - DropDisabledRunAsGroupField(podSpec) - - if !utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) && podSpec.RuntimeClassName != nil { - podSpec.RuntimeClassName = nil - } - - DropDisabledProcMountField(podSpec) -} - -// DropDisabledRunAsGroupField removes disabled fields from PodSpec related -// to RunAsGroup -func DropDisabledRunAsGroupField(podSpec *api.PodSpec) { - if !utilfeature.DefaultFeatureGate.Enabled(features.RunAsGroup) { - if podSpec.SecurityContext != nil { - podSpec.SecurityContext.RunAsGroup = nil - } - for i := range podSpec.Containers { - if podSpec.Containers[i].SecurityContext != nil { - podSpec.Containers[i].SecurityContext.RunAsGroup = nil - } - } - for i := range podSpec.InitContainers { - if podSpec.InitContainers[i].SecurityContext != nil { - podSpec.InitContainers[i].SecurityContext.RunAsGroup = nil - } - } - } -} - -// DropDisabledProcMountField removes disabled fields from PodSpec related -// to ProcMount -func DropDisabledProcMountField(podSpec *api.PodSpec) { - if !utilfeature.DefaultFeatureGate.Enabled(features.ProcMountType) { - defProcMount := api.DefaultProcMount - for i := range podSpec.Containers { - if podSpec.Containers[i].SecurityContext != nil { - podSpec.Containers[i].SecurityContext.ProcMount = &defProcMount - } - } - for i := range podSpec.InitContainers { - if podSpec.InitContainers[i].SecurityContext != nil { - podSpec.InitContainers[i].SecurityContext.ProcMount = &defProcMount - } - } - } -} - -// DropDisabledVolumeMountsAlphaFields removes disabled fields from []VolumeMount. -// This should be called from PrepareForCreate/PrepareForUpdate for all resources containing a VolumeMount -func DropDisabledVolumeMountsAlphaFields(volumeMounts []api.VolumeMount) { - if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) { - for i := range volumeMounts { - volumeMounts[i].MountPropagation = nil - } - } -} - -// DropDisabledVolumeDevicesAlphaFields removes disabled fields from []VolumeDevice. -// This should be called from PrepareForCreate/PrepareForUpdate for all resources containing a VolumeDevice -func DropDisabledVolumeDevicesAlphaFields(podSpec *api.PodSpec) { - if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - for i := range podSpec.Containers { - podSpec.Containers[i].VolumeDevices = nil - } - for i := range podSpec.InitContainers { - podSpec.InitContainers[i].VolumeDevices = nil - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/ref/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/api/ref/BUILD.bazel deleted file mode 100644 index a8656bf0b6ce8..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/ref/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["ref.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/api/ref", - importpath = "k8s.io/kubernetes/pkg/api/ref", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/ref/ref.go b/vendor/k8s.io/kubernetes/pkg/api/ref/ref.go deleted file mode 100644 index d6576750ad115..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/ref/ref.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ref - -import ( - "errors" - "fmt" - "net/url" - "strings" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/apis/core" -) - -var ( - // Errors that could be returned by GetReference. - ErrNilObject = errors.New("can't reference a nil object") - ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") -) - -// GetReference returns an ObjectReference which refers to the given -// object, or an error if the object doesn't follow the conventions -// that would allow this. -// TODO: should take a meta.Interface see http://issue.k8s.io/7127 -func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*api.ObjectReference, error) { - if obj == nil { - return nil, ErrNilObject - } - if ref, ok := obj.(*api.ObjectReference); ok { - // Don't make a reference to a reference. - return ref, nil - } - - gvk := obj.GetObjectKind().GroupVersionKind() - - // if the object referenced is actually persisted, we can just get kind from meta - // if we are building an object reference to something not yet persisted, we should fallback to scheme - kind := gvk.Kind - if len(kind) == 0 { - // TODO: this is wrong - gvks, _, err := scheme.ObjectKinds(obj) - if err != nil { - return nil, err - } - kind = gvks[0].Kind - } - - // An object that implements only List has enough metadata to build a reference - var listMeta metav1.Common - objectMeta, err := meta.Accessor(obj) - if err != nil { - listMeta, err = meta.CommonAccessor(obj) - if err != nil { - return nil, err - } - } else { - listMeta = objectMeta - } - - // if the object referenced is actually persisted, we can also get version from meta - version := gvk.GroupVersion().String() - if len(version) == 0 { - selfLink := listMeta.GetSelfLink() - if len(selfLink) == 0 { - return nil, ErrNoSelfLink - } - selfLinkUrl, err := url.Parse(selfLink) - if err != nil { - return nil, err - } - // example paths: ///* - parts := strings.Split(selfLinkUrl.Path, "/") - if len(parts) < 3 { - return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) - } - version = parts[2] - } - - // only has list metadata - if objectMeta == nil { - return &api.ObjectReference{ - Kind: kind, - APIVersion: version, - ResourceVersion: listMeta.GetResourceVersion(), - }, nil - } - - return &api.ObjectReference{ - Kind: kind, - APIVersion: version, - Name: objectMeta.GetName(), - Namespace: objectMeta.GetNamespace(), - UID: objectMeta.GetUID(), - ResourceVersion: objectMeta.GetResourceVersion(), - }, nil -} - -// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. -func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*api.ObjectReference, error) { - ref, err := GetReference(scheme, obj) - if err != nil { - return nil, err - } - ref.FieldPath = fieldPath - return ref, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD.bazel deleted file mode 100644 index 43ffa1c8e5f76..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["helpers.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/api/resource", - importpath = "k8s.io/kubernetes/pkg/api/resource", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go b/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go index 416221d520e57..558e8a48c1a45 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go @@ -238,13 +238,13 @@ func IsPodReady(pod *v1.Pod) bool { return IsPodReadyConditionTrue(pod.Status) } -// IsPodReady returns true if a pod is ready; false otherwise. +// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. func IsPodReadyConditionTrue(status v1.PodStatus) bool { condition := GetPodReadyCondition(status) return condition != nil && condition.Status == v1.ConditionTrue } -// Extracts the pod ready condition from the given status and returns that. +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. // Returns nil if the condition is not present. func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { _, condition := GetPodCondition(&status, v1.PodReady) @@ -274,7 +274,7 @@ func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodC return -1, nil } -// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the // status has changed. // Returns true if pod condition has changed or has been added. func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { @@ -286,20 +286,19 @@ func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { // We are adding new pod condition. status.Conditions = append(status.Conditions, *condition) return true - } else { - // We are updating an existing condition, so we need to check if it has changed. - if condition.Status == oldCondition.Status { - condition.LastTransitionTime = oldCondition.LastTransitionTime - } + } + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } - isEqual := condition.Status == oldCondition.Status && - condition.Reason == oldCondition.Reason && - condition.Message == oldCondition.Message && - condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && - condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) - status.Conditions[conditionIndex] = *condition - // Return true if one of the fields have changed. - return !isEqual - } + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/resource/helpers.go b/vendor/k8s.io/kubernetes/pkg/api/v1/resource/helpers.go deleted file mode 100644 index 6f7c59f587d44..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/resource/helpers.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "fmt" - "math" - "strconv" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" -) - -// addResourceList adds the resources in newList to list -func addResourceList(list, new v1.ResourceList) { - for name, quantity := range new { - if value, ok := list[name]; !ok { - list[name] = *quantity.Copy() - } else { - value.Add(quantity) - list[name] = value - } - } -} - -// maxResourceList sets list to the greater of list/newList for every resource -// either list -func maxResourceList(list, new v1.ResourceList) { - for name, quantity := range new { - if value, ok := list[name]; !ok { - list[name] = *quantity.Copy() - continue - } else { - if quantity.Cmp(value) > 0 { - list[name] = *quantity.Copy() - } - } - } -} - -// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all -// containers of the pod. -func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) { - reqs, limits = v1.ResourceList{}, v1.ResourceList{} - for _, container := range pod.Spec.Containers { - addResourceList(reqs, container.Resources.Requests) - addResourceList(limits, container.Resources.Limits) - } - // init containers define the minimum of any resource - for _, container := range pod.Spec.InitContainers { - maxResourceList(reqs, container.Resources.Requests) - maxResourceList(limits, container.Resources.Limits) - } - return -} - -// finds and returns the request for a specific resource. -func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 { - if resource == v1.ResourcePods { - return 1 - } - totalResources := int64(0) - for _, container := range pod.Spec.Containers { - if rQuantity, ok := container.Resources.Requests[resource]; ok { - if resource == v1.ResourceCPU { - totalResources += rQuantity.MilliValue() - } else { - totalResources += rQuantity.Value() - } - } - } - // take max_resource(sum_pod, any_init_container) - for _, container := range pod.Spec.InitContainers { - if rQuantity, ok := container.Resources.Requests[resource]; ok { - if resource == v1.ResourceCPU && rQuantity.MilliValue() > totalResources { - totalResources = rQuantity.MilliValue() - } else if rQuantity.Value() > totalResources { - totalResources = rQuantity.Value() - } - } - } - return totalResources -} - -// ExtractResourceValueByContainerName extracts the value of a resource -// by providing container name -func ExtractResourceValueByContainerName(fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string) (string, error) { - container, err := findContainerInPod(pod, containerName) - if err != nil { - return "", err - } - return ExtractContainerResourceValue(fs, container) -} - -// ExtractResourceValueByContainerNameAndNodeAllocatable extracts the value of a resource -// by providing container name and node allocatable -func ExtractResourceValueByContainerNameAndNodeAllocatable(fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string, nodeAllocatable v1.ResourceList) (string, error) { - realContainer, err := findContainerInPod(pod, containerName) - if err != nil { - return "", err - } - - container := realContainer.DeepCopy() - - MergeContainerResourceLimits(container, nodeAllocatable) - - return ExtractContainerResourceValue(fs, container) -} - -// ExtractContainerResourceValue extracts the value of a resource -// in an already known container -func ExtractContainerResourceValue(fs *v1.ResourceFieldSelector, container *v1.Container) (string, error) { - divisor := resource.Quantity{} - if divisor.Cmp(fs.Divisor) == 0 { - divisor = resource.MustParse("1") - } else { - divisor = fs.Divisor - } - - switch fs.Resource { - case "limits.cpu": - return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) - case "limits.memory": - return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) - case "limits.ephemeral-storage": - return convertResourceEphemeralStorageToString(container.Resources.Limits.StorageEphemeral(), divisor) - case "requests.cpu": - return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) - case "requests.memory": - return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) - case "requests.ephemeral-storage": - return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor) - } - - return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) -} - -// convertResourceCPUToString converts cpu value to the format of divisor and returns -// ceiling of the value. -func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { - c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) - return strconv.FormatInt(c, 10), nil -} - -// convertResourceMemoryToString converts memory value to the format of divisor and returns -// ceiling of the value. -func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { - m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) - return strconv.FormatInt(m, 10), nil -} - -// convertResourceEphemeralStorageToString converts ephemeral storage value to the format of divisor and returns -// ceiling of the value. -func convertResourceEphemeralStorageToString(ephemeralStorage *resource.Quantity, divisor resource.Quantity) (string, error) { - m := int64(math.Ceil(float64(ephemeralStorage.Value()) / float64(divisor.Value()))) - return strconv.FormatInt(m, 10), nil -} - -// findContainerInPod finds a container by its name in the provided pod -func findContainerInPod(pod *v1.Pod, containerName string) (*v1.Container, error) { - for _, container := range pod.Spec.Containers { - if container.Name == containerName { - return &container, nil - } - } - return nil, fmt.Errorf("container %s not found", containerName) -} - -// MergeContainerResourceLimits checks if a limit is applied for -// the container, and if not, it sets the limit to the passed resource list. -func MergeContainerResourceLimits(container *v1.Container, - allocatable v1.ResourceList) { - if container.Resources.Limits == nil { - container.Resources.Limits = make(v1.ResourceList) - } - for _, resource := range []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory, v1.ResourceEphemeralStorage} { - if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() { - if cap, exists := allocatable[resource]; exists { - container.Resources.Limits[resource] = *cap.Copy() - } - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/doc.go deleted file mode 100644 index 7b76bb29b24c2..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package - -// Package admissionregistration is the internal version of the API. -// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// InitializerConfiguration, ValidatingWebhookConfiguration, and MutatingWebhookConfiguration are for the -// new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io -package admissionregistration // import "k8s.io/kubernetes/pkg/apis/admissionregistration" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/BUILD.bazel deleted file mode 100644 index 2709afdb87fef..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install", - importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration/install", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/admissionregistration:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/install.go deleted file mode 100644 index 65a38b2fb3408..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/install.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package install - -import ( - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/apis/admissionregistration" - "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1" - "k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1" -) - -func init() { - Install(legacyscheme.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(scheme *runtime.Scheme) { - utilruntime.Must(admissionregistration.AddToScheme(scheme)) - utilruntime.Must(v1alpha1.AddToScheme(scheme)) - utilruntime.Must(v1beta1.AddToScheme(scheme)) - utilruntime.Must(scheme.SetVersionPriority(v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion)) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/register.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/register.go deleted file mode 100644 index 941259ce6283a..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/register.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admissionregistration - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const GroupName = "admissionregistration.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &InitializerConfiguration{}, - &InitializerConfigurationList{}, - &ValidatingWebhookConfiguration{}, - &ValidatingWebhookConfigurationList{}, - &MutatingWebhookConfiguration{}, - &MutatingWebhookConfigurationList{}, - ) - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go deleted file mode 100644 index 511bdd96d1c5f..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go +++ /dev/null @@ -1,350 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admissionregistration - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InitializerConfiguration describes the configuration of initializers. -type InitializerConfiguration struct { - metav1.TypeMeta - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta - - // Initializers is a list of resources and their default initializers - // Order-sensitive. - // When merging multiple InitializerConfigurations, we sort the initializers - // from different InitializerConfigurations by the name of the - // InitializerConfigurations; the order of the initializers from the same - // InitializerConfiguration is preserved. - // +optional - Initializers []Initializer -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InitializerConfigurationList is a list of InitializerConfiguration. -type InitializerConfigurationList struct { - metav1.TypeMeta - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta - - // List of InitializerConfiguration. - Items []InitializerConfiguration -} - -// Initializer describes the name and the failure policy of an initializer, and -// what resources it applies to. -type Initializer struct { - // Name is the identifier of the initializer. It will be added to the - // object that needs to be initialized. - // Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where - // "alwayspullimages" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required - Name string - - // Rules describes what resources/subresources the initializer cares about. - // The initializer cares about an operation if it matches _any_ Rule. - // Rule.Resources must not include subresources. - Rules []Rule -} - -// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended -// to make sure that all the tuple expansions are valid. -type Rule struct { - // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. - // Required. - APIGroups []string - - // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. - // Required. - APIVersions []string - - // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. - // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. - // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - Resources []string -} - -type FailurePolicyType string - -const ( - // Ignore means that an error calling the webhook is ignored. - Ignore FailurePolicyType = "Ignore" - // Fail means that an error calling the webhook causes the admission to fail. - Fail FailurePolicyType = "Fail" -) - -type SideEffectClass string - -const ( - // SideEffectClassUnknown means that no information is known about the side effects of calling the webhook. - // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. - SideEffectClassUnknown SideEffectClass = "Unknown" - // SideEffectClassNone means that calling the webhook will have no side effects. - SideEffectClassNone SideEffectClass = "None" - // SideEffectClassSome means that calling the webhook will possibly have side effects. - // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. - SideEffectClassSome SideEffectClass = "Some" - // SideEffectClassNoneOnDryRun means that calling the webhook will possibly have side effects, but if the - // request being reviewed has the dry-run attribute, the side effects will be suppressed. - SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ValidatingWebhookConfiguration describes the configuration of an admission webhook that accepts or rejects and object without changing it. -type ValidatingWebhookConfiguration struct { - metav1.TypeMeta - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta - // Webhooks is a list of webhooks and the affected resources and operations. - // +optional - Webhooks []Webhook -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. -type ValidatingWebhookConfigurationList struct { - metav1.TypeMeta - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta - // List of ValidatingWebhookConfigurations. - Items []ValidatingWebhookConfiguration -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. -type MutatingWebhookConfiguration struct { - metav1.TypeMeta - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta - // Webhooks is a list of webhooks and the affected resources and operations. - // +optional - Webhooks []Webhook -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. -type MutatingWebhookConfigurationList struct { - metav1.TypeMeta - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta - // List of MutatingWebhookConfiguration. - Items []MutatingWebhookConfiguration -} - -// Webhook describes an admission webhook and the resources and operations it applies to. -type Webhook struct { - // The name of the admission webhook. - // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where - // "imagepolicy" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required. - Name string - - // ClientConfig defines how to communicate with the hook. - // Required - ClientConfig WebhookClientConfig - - // Rules describes what operations on what resources/subresources the webhook cares about. - // The webhook cares about an operation if it matches _any_ Rule. - Rules []RuleWithOperations - - // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - - // allowed values are Ignore or Fail. Defaults to Ignore. - // +optional - FailurePolicy *FailurePolicyType - - // NamespaceSelector decides whether to run the webhook on an object based - // on whether the namespace for that object matches the selector. If the - // object itself is a namespace, the matching is performed on - // object.metadata.labels. If the object is another cluster scoped resource, - // it never skips the webhook. - // - // For example, to run the webhook on any objects whose namespace is not - // associated with "runlevel" of "0" or "1"; you will set the selector as - // follows: - // "namespaceSelector": { - // "matchExpressions": [ - // { - // "key": "runlevel", - // "operator": "NotIn", - // "values": [ - // "0", - // "1" - // ] - // } - // ] - // } - // - // If instead you want to only run the webhook on any objects whose - // namespace is associated with the "environment" of "prod" or "staging"; - // you will set the selector as follows: - // "namespaceSelector": { - // "matchExpressions": [ - // { - // "key": "environment", - // "operator": "In", - // "values": [ - // "prod", - // "staging" - // ] - // } - // ] - // } - // - // See - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - // for more examples of label selectors. - // - // Default to the empty LabelSelector, which matches everything. - // +optional - NamespaceSelector *metav1.LabelSelector - - // SideEffects states whether this webhookk has side effects. - // Acceptable values are: Unknown, None, Some, NoneOnDryRun - // Webhooks with side effects MUST implement a reconciliation system, since a request may be - // rejected by a future step in the admission change and the side effects therefore need to be undone. - // Requests with the dryRun attribute will be auto-rejected if they match a webhook with - // sideEffects == Unknown or Some. Defaults to Unknown. - // +optional - SideEffects *SideEffectClass -} - -// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make -// sure that all the tuple expansions are valid. -type RuleWithOperations struct { - // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * - // for all operations. - // If '*' is present, the length of the slice must be one. - // Required. - Operations []OperationType - // Rule is embedded, it describes other criteria of the rule, like - // APIGroups, APIVersions, Resources, etc. - Rule -} - -type OperationType string - -// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. -const ( - OperationAll OperationType = "*" - Create OperationType = "CREATE" - Update OperationType = "UPDATE" - Delete OperationType = "DELETE" - Connect OperationType = "CONNECT" -) - -// WebhookClientConfig contains the information to make a TLS -// connection with the webhook -type WebhookClientConfig struct { - // `url` gives the location of the webhook, in standard URL form - // (`[scheme://]host:port/path`). Exactly one of `url` or `service` - // must be specified. - // - // The `host` should not refer to a service running in the cluster; use - // the `service` field instead. The host might be resolved via external - // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve - // in-cluster DNS as that would be a layering violation). `host` may - // also be an IP address. - // - // Please note that using `localhost` or `127.0.0.1` as a `host` is - // risky unless you take great care to run this webhook on all hosts - // which run an apiserver which might need to make calls to this - // webhook. Such installs are likely to be non-portable, i.e., not easy - // to turn up in a new cluster. - // - // The scheme must be "https"; the URL must begin with "https://". - // - // A path is optional, and if present may be any string permissible in - // a URL. You may use the path to pass an arbitrary string to the - // webhook, for example, a cluster identifier. - // - // Attempting to use a user or basic auth e.g. "user:password@" is not - // allowed. Fragments ("#...") and query parameters ("?...") are not - // allowed, either. - // - // +optional - URL *string - - // `service` is a reference to the service for this webhook. Either - // `service` or `url` must be specified. - // - // If the webhook is running within the cluster, then you should use `service`. - // - // Port 443 will be used if it is open, otherwise it is an error. - // - // +optional - Service *ServiceReference - - // `caBundle` is a PEM encoded CA bundle which will be used to validate - // the webhook's server certificate. - // Required. - CABundle []byte -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -type ServiceReference struct { - // `namespace` is the namespace of the service. - // Required - Namespace string - // `name` is the name of the service. - // Required - Name string - - // `path` is an optional URL path which will be sent in any request to - // this service. - // +optional - Path *string -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/BUILD.bazel deleted file mode 100644 index 2349a1a9a6d5b..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "doc.go", - "register.go", - "zz_generated.conversion.go", - "zz_generated.defaults.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1", - importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/admissionregistration:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/doc.go deleted file mode 100644 index 43fd23dabf628..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/admissionregistration -// +k8s:conversion-gen-external-types=k8s.io/api/admissionregistration/v1alpha1 -// +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/admissionregistration/v1alpha1 - -// Package v1alpha1 is the v1alpha1 version of the API. -// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// InitializerConfiguration, ValidatingWebhookConfiguration, and MutatingWebhookConfiguration are for the -// new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io -package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/register.go deleted file mode 100644 index f66c796afa293..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/register.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const GroupName = "admissionregistration.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - localSchemeBuilder = &admissionregistrationv1alpha1.SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index 3eb891669d726..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,170 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - unsafe "unsafe" - - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1alpha1.Initializer)(nil), (*admissionregistration.Initializer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_Initializer_To_admissionregistration_Initializer(a.(*v1alpha1.Initializer), b.(*admissionregistration.Initializer), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*admissionregistration.Initializer)(nil), (*v1alpha1.Initializer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_admissionregistration_Initializer_To_v1alpha1_Initializer(a.(*admissionregistration.Initializer), b.(*v1alpha1.Initializer), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.InitializerConfiguration)(nil), (*admissionregistration.InitializerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_InitializerConfiguration_To_admissionregistration_InitializerConfiguration(a.(*v1alpha1.InitializerConfiguration), b.(*admissionregistration.InitializerConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*admissionregistration.InitializerConfiguration)(nil), (*v1alpha1.InitializerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_admissionregistration_InitializerConfiguration_To_v1alpha1_InitializerConfiguration(a.(*admissionregistration.InitializerConfiguration), b.(*v1alpha1.InitializerConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.InitializerConfigurationList)(nil), (*admissionregistration.InitializerConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_InitializerConfigurationList_To_admissionregistration_InitializerConfigurationList(a.(*v1alpha1.InitializerConfigurationList), b.(*admissionregistration.InitializerConfigurationList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*admissionregistration.InitializerConfigurationList)(nil), (*v1alpha1.InitializerConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_admissionregistration_InitializerConfigurationList_To_v1alpha1_InitializerConfigurationList(a.(*admissionregistration.InitializerConfigurationList), b.(*v1alpha1.InitializerConfigurationList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.Rule)(nil), (*admissionregistration.Rule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_Rule_To_admissionregistration_Rule(a.(*v1alpha1.Rule), b.(*admissionregistration.Rule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*admissionregistration.Rule)(nil), (*v1alpha1.Rule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_admissionregistration_Rule_To_v1alpha1_Rule(a.(*admissionregistration.Rule), b.(*v1alpha1.Rule), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_Initializer_To_admissionregistration_Initializer(in *v1alpha1.Initializer, out *admissionregistration.Initializer, s conversion.Scope) error { - out.Name = in.Name - out.Rules = *(*[]admissionregistration.Rule)(unsafe.Pointer(&in.Rules)) - return nil -} - -// Convert_v1alpha1_Initializer_To_admissionregistration_Initializer is an autogenerated conversion function. -func Convert_v1alpha1_Initializer_To_admissionregistration_Initializer(in *v1alpha1.Initializer, out *admissionregistration.Initializer, s conversion.Scope) error { - return autoConvert_v1alpha1_Initializer_To_admissionregistration_Initializer(in, out, s) -} - -func autoConvert_admissionregistration_Initializer_To_v1alpha1_Initializer(in *admissionregistration.Initializer, out *v1alpha1.Initializer, s conversion.Scope) error { - out.Name = in.Name - out.Rules = *(*[]v1alpha1.Rule)(unsafe.Pointer(&in.Rules)) - return nil -} - -// Convert_admissionregistration_Initializer_To_v1alpha1_Initializer is an autogenerated conversion function. -func Convert_admissionregistration_Initializer_To_v1alpha1_Initializer(in *admissionregistration.Initializer, out *v1alpha1.Initializer, s conversion.Scope) error { - return autoConvert_admissionregistration_Initializer_To_v1alpha1_Initializer(in, out, s) -} - -func autoConvert_v1alpha1_InitializerConfiguration_To_admissionregistration_InitializerConfiguration(in *v1alpha1.InitializerConfiguration, out *admissionregistration.InitializerConfiguration, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Initializers = *(*[]admissionregistration.Initializer)(unsafe.Pointer(&in.Initializers)) - return nil -} - -// Convert_v1alpha1_InitializerConfiguration_To_admissionregistration_InitializerConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_InitializerConfiguration_To_admissionregistration_InitializerConfiguration(in *v1alpha1.InitializerConfiguration, out *admissionregistration.InitializerConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_InitializerConfiguration_To_admissionregistration_InitializerConfiguration(in, out, s) -} - -func autoConvert_admissionregistration_InitializerConfiguration_To_v1alpha1_InitializerConfiguration(in *admissionregistration.InitializerConfiguration, out *v1alpha1.InitializerConfiguration, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Initializers = *(*[]v1alpha1.Initializer)(unsafe.Pointer(&in.Initializers)) - return nil -} - -// Convert_admissionregistration_InitializerConfiguration_To_v1alpha1_InitializerConfiguration is an autogenerated conversion function. -func Convert_admissionregistration_InitializerConfiguration_To_v1alpha1_InitializerConfiguration(in *admissionregistration.InitializerConfiguration, out *v1alpha1.InitializerConfiguration, s conversion.Scope) error { - return autoConvert_admissionregistration_InitializerConfiguration_To_v1alpha1_InitializerConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_InitializerConfigurationList_To_admissionregistration_InitializerConfigurationList(in *v1alpha1.InitializerConfigurationList, out *admissionregistration.InitializerConfigurationList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]admissionregistration.InitializerConfiguration)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_InitializerConfigurationList_To_admissionregistration_InitializerConfigurationList is an autogenerated conversion function. -func Convert_v1alpha1_InitializerConfigurationList_To_admissionregistration_InitializerConfigurationList(in *v1alpha1.InitializerConfigurationList, out *admissionregistration.InitializerConfigurationList, s conversion.Scope) error { - return autoConvert_v1alpha1_InitializerConfigurationList_To_admissionregistration_InitializerConfigurationList(in, out, s) -} - -func autoConvert_admissionregistration_InitializerConfigurationList_To_v1alpha1_InitializerConfigurationList(in *admissionregistration.InitializerConfigurationList, out *v1alpha1.InitializerConfigurationList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha1.InitializerConfiguration)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_admissionregistration_InitializerConfigurationList_To_v1alpha1_InitializerConfigurationList is an autogenerated conversion function. -func Convert_admissionregistration_InitializerConfigurationList_To_v1alpha1_InitializerConfigurationList(in *admissionregistration.InitializerConfigurationList, out *v1alpha1.InitializerConfigurationList, s conversion.Scope) error { - return autoConvert_admissionregistration_InitializerConfigurationList_To_v1alpha1_InitializerConfigurationList(in, out, s) -} - -func autoConvert_v1alpha1_Rule_To_admissionregistration_Rule(in *v1alpha1.Rule, out *admissionregistration.Rule, s conversion.Scope) error { - out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) - out.APIVersions = *(*[]string)(unsafe.Pointer(&in.APIVersions)) - out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_v1alpha1_Rule_To_admissionregistration_Rule is an autogenerated conversion function. -func Convert_v1alpha1_Rule_To_admissionregistration_Rule(in *v1alpha1.Rule, out *admissionregistration.Rule, s conversion.Scope) error { - return autoConvert_v1alpha1_Rule_To_admissionregistration_Rule(in, out, s) -} - -func autoConvert_admissionregistration_Rule_To_v1alpha1_Rule(in *admissionregistration.Rule, out *v1alpha1.Rule, s conversion.Scope) error { - out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) - out.APIVersions = *(*[]string)(unsafe.Pointer(&in.APIVersions)) - out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_admissionregistration_Rule_To_v1alpha1_Rule is an autogenerated conversion function. -func Convert_admissionregistration_Rule_To_v1alpha1_Rule(in *admissionregistration.Rule, out *v1alpha1.Rule, s conversion.Scope) error { - return autoConvert_admissionregistration_Rule_To_v1alpha1_Rule(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/BUILD.bazel deleted file mode 100644 index 36cd9cb34db24..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "doc.go", - "register.go", - "zz_generated.conversion.go", - "zz_generated.defaults.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1", - importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/admissionregistration:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/defaults.go deleted file mode 100644 index fa35267624d0c..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/defaults.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - -func SetDefaults_Webhook(obj *admissionregistrationv1beta1.Webhook) { - if obj.FailurePolicy == nil { - policy := admissionregistrationv1beta1.Ignore - obj.FailurePolicy = &policy - } - if obj.NamespaceSelector == nil { - selector := metav1.LabelSelector{} - obj.NamespaceSelector = &selector - } - if obj.SideEffects == nil { - // TODO: revisit/remove this default and possibly make the field required when promoting to v1 - unknown := admissionregistrationv1beta1.SideEffectClassUnknown - obj.SideEffects = &unknown - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/doc.go deleted file mode 100644 index cf03718ed26af..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/admissionregistration -// +k8s:conversion-gen-external-types=k8s.io/api/admissionregistration/v1beta1 -// +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/admissionregistration/v1beta1 - -// Package v1beta1 is the v1beta1 version of the API. -// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// InitializerConfiguration, ValidatingWebhookConfiguration, and MutatingWebhookConfiguration are for the -// new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io -package v1beta1 // import "k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go deleted file mode 100644 index 3d1d1be71777e..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,351 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1beta1 - -import ( - unsafe "unsafe" - - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1beta1.MutatingWebhookConfiguration)(nil), (*admissionregistration.MutatingWebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(a.(*v1beta1.MutatingWebhookConfiguration), b.(*admissionregistration.MutatingWebhookConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingWebhookConfiguration)(nil), (*v1beta1.MutatingWebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration(a.(*admissionregistration.MutatingWebhookConfiguration), b.(*v1beta1.MutatingWebhookConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.MutatingWebhookConfigurationList)(nil), (*admissionregistration.MutatingWebhookConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(a.(*v1beta1.MutatingWebhookConfigurationList), b.(*admissionregistration.MutatingWebhookConfigurationList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingWebhookConfigurationList)(nil), (*v1beta1.MutatingWebhookConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList(a.(*admissionregistration.MutatingWebhookConfigurationList), b.(*v1beta1.MutatingWebhookConfigurationList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.Rule)(nil), (*admissionregistration.Rule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_Rule_To_admissionregistration_Rule(a.(*v1beta1.Rule), b.(*admissionregistration.Rule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*admissionregistration.Rule)(nil), (*v1beta1.Rule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_admissionregistration_Rule_To_v1beta1_Rule(a.(*admissionregistration.Rule), b.(*v1beta1.Rule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.RuleWithOperations)(nil), (*admissionregistration.RuleWithOperations)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RuleWithOperations_To_admissionregistration_RuleWithOperations(a.(*v1beta1.RuleWithOperations), b.(*admissionregistration.RuleWithOperations), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*admissionregistration.RuleWithOperations)(nil), (*v1beta1.RuleWithOperations)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_admissionregistration_RuleWithOperations_To_v1beta1_RuleWithOperations(a.(*admissionregistration.RuleWithOperations), b.(*v1beta1.RuleWithOperations), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.ServiceReference)(nil), (*admissionregistration.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference(a.(*v1beta1.ServiceReference), b.(*admissionregistration.ServiceReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*admissionregistration.ServiceReference)(nil), (*v1beta1.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference(a.(*admissionregistration.ServiceReference), b.(*v1beta1.ServiceReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.ValidatingWebhookConfiguration)(nil), (*admissionregistration.ValidatingWebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(a.(*v1beta1.ValidatingWebhookConfiguration), b.(*admissionregistration.ValidatingWebhookConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingWebhookConfiguration)(nil), (*v1beta1.ValidatingWebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration(a.(*admissionregistration.ValidatingWebhookConfiguration), b.(*v1beta1.ValidatingWebhookConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.ValidatingWebhookConfigurationList)(nil), (*admissionregistration.ValidatingWebhookConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(a.(*v1beta1.ValidatingWebhookConfigurationList), b.(*admissionregistration.ValidatingWebhookConfigurationList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingWebhookConfigurationList)(nil), (*v1beta1.ValidatingWebhookConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList(a.(*admissionregistration.ValidatingWebhookConfigurationList), b.(*v1beta1.ValidatingWebhookConfigurationList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.Webhook)(nil), (*admissionregistration.Webhook)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_Webhook_To_admissionregistration_Webhook(a.(*v1beta1.Webhook), b.(*admissionregistration.Webhook), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*admissionregistration.Webhook)(nil), (*v1beta1.Webhook)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_admissionregistration_Webhook_To_v1beta1_Webhook(a.(*admissionregistration.Webhook), b.(*v1beta1.Webhook), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.WebhookClientConfig)(nil), (*admissionregistration.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(a.(*v1beta1.WebhookClientConfig), b.(*admissionregistration.WebhookClientConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*admissionregistration.WebhookClientConfig)(nil), (*v1beta1.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(a.(*admissionregistration.WebhookClientConfig), b.(*v1beta1.WebhookClientConfig), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(in *v1beta1.MutatingWebhookConfiguration, out *admissionregistration.MutatingWebhookConfiguration, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Webhooks = *(*[]admissionregistration.Webhook)(unsafe.Pointer(&in.Webhooks)) - return nil -} - -// Convert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration is an autogenerated conversion function. -func Convert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(in *v1beta1.MutatingWebhookConfiguration, out *admissionregistration.MutatingWebhookConfiguration, s conversion.Scope) error { - return autoConvert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(in, out, s) -} - -func autoConvert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration(in *admissionregistration.MutatingWebhookConfiguration, out *v1beta1.MutatingWebhookConfiguration, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Webhooks = *(*[]v1beta1.Webhook)(unsafe.Pointer(&in.Webhooks)) - return nil -} - -// Convert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration is an autogenerated conversion function. -func Convert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration(in *admissionregistration.MutatingWebhookConfiguration, out *v1beta1.MutatingWebhookConfiguration, s conversion.Scope) error { - return autoConvert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration(in, out, s) -} - -func autoConvert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(in *v1beta1.MutatingWebhookConfigurationList, out *admissionregistration.MutatingWebhookConfigurationList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]admissionregistration.MutatingWebhookConfiguration)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList is an autogenerated conversion function. -func Convert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(in *v1beta1.MutatingWebhookConfigurationList, out *admissionregistration.MutatingWebhookConfigurationList, s conversion.Scope) error { - return autoConvert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(in, out, s) -} - -func autoConvert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList(in *admissionregistration.MutatingWebhookConfigurationList, out *v1beta1.MutatingWebhookConfigurationList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1beta1.MutatingWebhookConfiguration)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList is an autogenerated conversion function. -func Convert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList(in *admissionregistration.MutatingWebhookConfigurationList, out *v1beta1.MutatingWebhookConfigurationList, s conversion.Scope) error { - return autoConvert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList(in, out, s) -} - -func autoConvert_v1beta1_Rule_To_admissionregistration_Rule(in *v1beta1.Rule, out *admissionregistration.Rule, s conversion.Scope) error { - out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) - out.APIVersions = *(*[]string)(unsafe.Pointer(&in.APIVersions)) - out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_v1beta1_Rule_To_admissionregistration_Rule is an autogenerated conversion function. -func Convert_v1beta1_Rule_To_admissionregistration_Rule(in *v1beta1.Rule, out *admissionregistration.Rule, s conversion.Scope) error { - return autoConvert_v1beta1_Rule_To_admissionregistration_Rule(in, out, s) -} - -func autoConvert_admissionregistration_Rule_To_v1beta1_Rule(in *admissionregistration.Rule, out *v1beta1.Rule, s conversion.Scope) error { - out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) - out.APIVersions = *(*[]string)(unsafe.Pointer(&in.APIVersions)) - out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_admissionregistration_Rule_To_v1beta1_Rule is an autogenerated conversion function. -func Convert_admissionregistration_Rule_To_v1beta1_Rule(in *admissionregistration.Rule, out *v1beta1.Rule, s conversion.Scope) error { - return autoConvert_admissionregistration_Rule_To_v1beta1_Rule(in, out, s) -} - -func autoConvert_v1beta1_RuleWithOperations_To_admissionregistration_RuleWithOperations(in *v1beta1.RuleWithOperations, out *admissionregistration.RuleWithOperations, s conversion.Scope) error { - out.Operations = *(*[]admissionregistration.OperationType)(unsafe.Pointer(&in.Operations)) - if err := Convert_v1beta1_Rule_To_admissionregistration_Rule(&in.Rule, &out.Rule, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta1_RuleWithOperations_To_admissionregistration_RuleWithOperations is an autogenerated conversion function. -func Convert_v1beta1_RuleWithOperations_To_admissionregistration_RuleWithOperations(in *v1beta1.RuleWithOperations, out *admissionregistration.RuleWithOperations, s conversion.Scope) error { - return autoConvert_v1beta1_RuleWithOperations_To_admissionregistration_RuleWithOperations(in, out, s) -} - -func autoConvert_admissionregistration_RuleWithOperations_To_v1beta1_RuleWithOperations(in *admissionregistration.RuleWithOperations, out *v1beta1.RuleWithOperations, s conversion.Scope) error { - out.Operations = *(*[]v1beta1.OperationType)(unsafe.Pointer(&in.Operations)) - if err := Convert_admissionregistration_Rule_To_v1beta1_Rule(&in.Rule, &out.Rule, s); err != nil { - return err - } - return nil -} - -// Convert_admissionregistration_RuleWithOperations_To_v1beta1_RuleWithOperations is an autogenerated conversion function. -func Convert_admissionregistration_RuleWithOperations_To_v1beta1_RuleWithOperations(in *admissionregistration.RuleWithOperations, out *v1beta1.RuleWithOperations, s conversion.Scope) error { - return autoConvert_admissionregistration_RuleWithOperations_To_v1beta1_RuleWithOperations(in, out, s) -} - -func autoConvert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference(in *v1beta1.ServiceReference, out *admissionregistration.ServiceReference, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Name = in.Name - out.Path = (*string)(unsafe.Pointer(in.Path)) - return nil -} - -// Convert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference is an autogenerated conversion function. -func Convert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference(in *v1beta1.ServiceReference, out *admissionregistration.ServiceReference, s conversion.Scope) error { - return autoConvert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference(in, out, s) -} - -func autoConvert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference(in *admissionregistration.ServiceReference, out *v1beta1.ServiceReference, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Name = in.Name - out.Path = (*string)(unsafe.Pointer(in.Path)) - return nil -} - -// Convert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference is an autogenerated conversion function. -func Convert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference(in *admissionregistration.ServiceReference, out *v1beta1.ServiceReference, s conversion.Scope) error { - return autoConvert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference(in, out, s) -} - -func autoConvert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(in *v1beta1.ValidatingWebhookConfiguration, out *admissionregistration.ValidatingWebhookConfiguration, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Webhooks = *(*[]admissionregistration.Webhook)(unsafe.Pointer(&in.Webhooks)) - return nil -} - -// Convert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration is an autogenerated conversion function. -func Convert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(in *v1beta1.ValidatingWebhookConfiguration, out *admissionregistration.ValidatingWebhookConfiguration, s conversion.Scope) error { - return autoConvert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(in, out, s) -} - -func autoConvert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration(in *admissionregistration.ValidatingWebhookConfiguration, out *v1beta1.ValidatingWebhookConfiguration, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Webhooks = *(*[]v1beta1.Webhook)(unsafe.Pointer(&in.Webhooks)) - return nil -} - -// Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration is an autogenerated conversion function. -func Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration(in *admissionregistration.ValidatingWebhookConfiguration, out *v1beta1.ValidatingWebhookConfiguration, s conversion.Scope) error { - return autoConvert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration(in, out, s) -} - -func autoConvert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(in *v1beta1.ValidatingWebhookConfigurationList, out *admissionregistration.ValidatingWebhookConfigurationList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]admissionregistration.ValidatingWebhookConfiguration)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList is an autogenerated conversion function. -func Convert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(in *v1beta1.ValidatingWebhookConfigurationList, out *admissionregistration.ValidatingWebhookConfigurationList, s conversion.Scope) error { - return autoConvert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(in, out, s) -} - -func autoConvert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList(in *admissionregistration.ValidatingWebhookConfigurationList, out *v1beta1.ValidatingWebhookConfigurationList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1beta1.ValidatingWebhookConfiguration)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList is an autogenerated conversion function. -func Convert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList(in *admissionregistration.ValidatingWebhookConfigurationList, out *v1beta1.ValidatingWebhookConfigurationList, s conversion.Scope) error { - return autoConvert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList(in, out, s) -} - -func autoConvert_v1beta1_Webhook_To_admissionregistration_Webhook(in *v1beta1.Webhook, out *admissionregistration.Webhook, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil { - return err - } - out.Rules = *(*[]admissionregistration.RuleWithOperations)(unsafe.Pointer(&in.Rules)) - out.FailurePolicy = (*admissionregistration.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy)) - out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) - out.SideEffects = (*admissionregistration.SideEffectClass)(unsafe.Pointer(in.SideEffects)) - return nil -} - -// Convert_v1beta1_Webhook_To_admissionregistration_Webhook is an autogenerated conversion function. -func Convert_v1beta1_Webhook_To_admissionregistration_Webhook(in *v1beta1.Webhook, out *admissionregistration.Webhook, s conversion.Scope) error { - return autoConvert_v1beta1_Webhook_To_admissionregistration_Webhook(in, out, s) -} - -func autoConvert_admissionregistration_Webhook_To_v1beta1_Webhook(in *admissionregistration.Webhook, out *v1beta1.Webhook, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil { - return err - } - out.Rules = *(*[]v1beta1.RuleWithOperations)(unsafe.Pointer(&in.Rules)) - out.FailurePolicy = (*v1beta1.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy)) - out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) - out.SideEffects = (*v1beta1.SideEffectClass)(unsafe.Pointer(in.SideEffects)) - return nil -} - -// Convert_admissionregistration_Webhook_To_v1beta1_Webhook is an autogenerated conversion function. -func Convert_admissionregistration_Webhook_To_v1beta1_Webhook(in *admissionregistration.Webhook, out *v1beta1.Webhook, s conversion.Scope) error { - return autoConvert_admissionregistration_Webhook_To_v1beta1_Webhook(in, out, s) -} - -func autoConvert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(in *v1beta1.WebhookClientConfig, out *admissionregistration.WebhookClientConfig, s conversion.Scope) error { - out.URL = (*string)(unsafe.Pointer(in.URL)) - out.Service = (*admissionregistration.ServiceReference)(unsafe.Pointer(in.Service)) - out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) - return nil -} - -// Convert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig is an autogenerated conversion function. -func Convert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(in *v1beta1.WebhookClientConfig, out *admissionregistration.WebhookClientConfig, s conversion.Scope) error { - return autoConvert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(in, out, s) -} - -func autoConvert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *admissionregistration.WebhookClientConfig, out *v1beta1.WebhookClientConfig, s conversion.Scope) error { - out.URL = (*string)(unsafe.Pointer(in.URL)) - out.Service = (*v1beta1.ServiceReference)(unsafe.Pointer(in.Service)) - out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) - return nil -} - -// Convert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig is an autogenerated conversion function. -func Convert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *admissionregistration.WebhookClientConfig, out *v1beta1.WebhookClientConfig, s conversion.Scope) error { - return autoConvert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/zz_generated.defaults.go deleted file mode 100644 index bf559d6ab9e95..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/zz_generated.defaults.go +++ /dev/null @@ -1,73 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&v1beta1.MutatingWebhookConfiguration{}, func(obj interface{}) { - SetObjectDefaults_MutatingWebhookConfiguration(obj.(*v1beta1.MutatingWebhookConfiguration)) - }) - scheme.AddTypeDefaultingFunc(&v1beta1.MutatingWebhookConfigurationList{}, func(obj interface{}) { - SetObjectDefaults_MutatingWebhookConfigurationList(obj.(*v1beta1.MutatingWebhookConfigurationList)) - }) - scheme.AddTypeDefaultingFunc(&v1beta1.ValidatingWebhookConfiguration{}, func(obj interface{}) { - SetObjectDefaults_ValidatingWebhookConfiguration(obj.(*v1beta1.ValidatingWebhookConfiguration)) - }) - scheme.AddTypeDefaultingFunc(&v1beta1.ValidatingWebhookConfigurationList{}, func(obj interface{}) { - SetObjectDefaults_ValidatingWebhookConfigurationList(obj.(*v1beta1.ValidatingWebhookConfigurationList)) - }) - return nil -} - -func SetObjectDefaults_MutatingWebhookConfiguration(in *v1beta1.MutatingWebhookConfiguration) { - for i := range in.Webhooks { - a := &in.Webhooks[i] - SetDefaults_Webhook(a) - } -} - -func SetObjectDefaults_MutatingWebhookConfigurationList(in *v1beta1.MutatingWebhookConfigurationList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_MutatingWebhookConfiguration(a) - } -} - -func SetObjectDefaults_ValidatingWebhookConfiguration(in *v1beta1.ValidatingWebhookConfiguration) { - for i := range in.Webhooks { - a := &in.Webhooks[i] - SetDefaults_Webhook(a) - } -} - -func SetObjectDefaults_ValidatingWebhookConfigurationList(in *v1beta1.ValidatingWebhookConfigurationList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_ValidatingWebhookConfiguration(a) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/zz_generated.deepcopy.go deleted file mode 100644 index 4d86d15899da5..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/zz_generated.deepcopy.go +++ /dev/null @@ -1,391 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package admissionregistration - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializer) DeepCopyInto(out *Initializer) { - *out = *in - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]Rule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. -func (in *Initializer) DeepCopy() *Initializer { - if in == nil { - return nil - } - out := new(Initializer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitializerConfiguration) DeepCopyInto(out *InitializerConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - *out = make([]Initializer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfiguration. -func (in *InitializerConfiguration) DeepCopy() *InitializerConfiguration { - if in == nil { - return nil - } - out := new(InitializerConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InitializerConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitializerConfigurationList) DeepCopyInto(out *InitializerConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]InitializerConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfigurationList. -func (in *InitializerConfigurationList) DeepCopy() *InitializerConfigurationList { - if in == nil { - return nil - } - out := new(InitializerConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InitializerConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Webhooks != nil { - in, out := &in.Webhooks, &out.Webhooks - *out = make([]Webhook, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration. -func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration { - if in == nil { - return nil - } - out := new(MutatingWebhookConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]MutatingWebhookConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList. -func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList { - if in == nil { - return nil - } - out := new(MutatingWebhookConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Rule) DeepCopyInto(out *Rule) { - *out = *in - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIVersions != nil { - in, out := &in.APIVersions, &out.APIVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. -func (in *Rule) DeepCopy() *Rule { - if in == nil { - return nil - } - out := new(Rule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { - *out = *in - if in.Operations != nil { - in, out := &in.Operations, &out.Operations - *out = make([]OperationType, len(*in)) - copy(*out, *in) - } - in.Rule.DeepCopyInto(&out.Rule) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. -func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { - if in == nil { - return nil - } - out := new(RuleWithOperations) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { - *out = *in - if in.Path != nil { - in, out := &in.Path, &out.Path - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. -func (in *ServiceReference) DeepCopy() *ServiceReference { - if in == nil { - return nil - } - out := new(ServiceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Webhooks != nil { - in, out := &in.Webhooks, &out.Webhooks - *out = make([]Webhook, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration. -func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration { - if in == nil { - return nil - } - out := new(ValidatingWebhookConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ValidatingWebhookConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList. -func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList { - if in == nil { - return nil - } - out := new(ValidatingWebhookConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Webhook) DeepCopyInto(out *Webhook) { - *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailurePolicy != nil { - in, out := &in.FailurePolicy, &out.FailurePolicy - *out = new(FailurePolicyType) - **out = **in - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.SideEffects != nil { - in, out := &in.SideEffects, &out.SideEffects - *out = new(SideEffectClass) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. -func (in *Webhook) DeepCopy() *Webhook { - if in == nil { - return nil - } - out := new(Webhook) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { - *out = *in - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(ServiceReference) - (*in).DeepCopyInto(*out) - } - if in.CABundle != nil { - in, out := &in.CABundle, &out.CABundle - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. -func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { - if in == nil { - return nil - } - out := new(WebhookClientConfig) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD.bazel index be0980d390616..1a06fb08254dc 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD.bazel @@ -15,8 +15,8 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go index 8b7591807cbe4..b56ec96cb36c6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/extensions" ) var ( @@ -48,18 +47,18 @@ func Resource(resource string) schema.GroupResource { func addKnownTypes(scheme *runtime.Scheme) error { // TODO this will get cleaned up with the scheme types are fixed scheme.AddKnownTypes(SchemeGroupVersion, - &extensions.DaemonSet{}, - &extensions.DaemonSetList{}, - &extensions.Deployment{}, - &extensions.DeploymentList{}, - &extensions.DeploymentRollback{}, + &DaemonSet{}, + &DaemonSetList{}, + &Deployment{}, + &DeploymentList{}, + &DeploymentRollback{}, &autoscaling.Scale{}, &StatefulSet{}, &StatefulSetList{}, &ControllerRevision{}, &ControllerRevisionList{}, - &extensions.ReplicaSet{}, - &extensions.ReplicaSetList{}, + &ReplicaSet{}, + &ReplicaSetList{}, ) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go index 850694d7fc23e..c15927d45ec94 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go @@ -19,12 +19,11 @@ package apps import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" api "k8s.io/kubernetes/pkg/apis/core" ) // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // StatefulSet represents a set of pods with consistent identities. @@ -262,3 +261,541 @@ type ControllerRevisionList struct { // Items is the list of ControllerRevision objects. Items []ControllerRevision } + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Deployment struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus +} + +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas int32 + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + Selector *metav1.LabelSelector + + // Template describes the pods that will be created. + Template api.PodTemplateSpec + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // This is set to the max value of int32 (i.e. 2147483647) by default, which means + // "retaining all old ReplicaSets". + // +optional + RevisionHistoryLimit *int32 + + // Indicates that the deployment is paused and will not be processed by the + // deployment controller. + // +optional + Paused bool + + // DEPRECATED. + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + RollbackTo *RollbackConfig + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. This is set to + // the max value of int32 (i.e. 2147483647) by default, which means "no deadline". + // +optional + ProgressDeadlineSeconds *int32 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED. +// DeploymentRollback stores the information required to rollback a deployment. +type DeploymentRollback struct { + metav1.TypeMeta + // Required: This must match the Name of a deployment. + Name string + // The annotations to be updated to a deployment + // +optional + UpdatedAnnotations map[string]string + // The config of this deployment rollback. + RollbackTo RollbackConfig +} + +// DEPRECATED. +type RollbackConfig struct { + // The revision to rollback to. If set to 0, rollback to the last revision. + // +optional + Revision int64 +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that at least 70% of original number of pods are available at all times + // during the update. + // +optional + MaxUnavailable intstr.IntOrString + + // The maximum number of pods that can be scheduled above the original number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of total pods at + // the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of original pods. + // +optional + MaxSurge intstr.IntOrString +} + +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + UnavailableReplicas int32 + + // Represents the latest available observations of a deployment's current state. + Conditions []DeploymentCondition + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time this condition was updated. + LastUpdateTime metav1.Time + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type DeploymentList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of deployments. + Items []Deployment +} + +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + Type DaemonSetUpdateStrategyType + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable intstr.IntOrString +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // If empty, defaulted to labels on Pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + Template api.PodTemplateSpec + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 + + // DEPRECATED. + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. + // +optional + TemplateGeneration int64 + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + CurrentNumberScheduled int32 + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + NumberMisscheduled int32 + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + DesiredNumberScheduled int32 + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + CollisionCount *int32 + + // Represents the latest available observations of a DaemonSet's current state. + Conditions []DaemonSetCondition +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus +} + +const ( + // DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead. + // DaemonSetTemplateGenerationKey is the key of the labels that is added + // to daemon set pods to distinguish between old and new pod templates + // during DaemonSet template update. + DaemonSetTemplateGenerationKey string = "pod-template-generation" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // A list of daemon sets. + Items []DaemonSet +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +type ReplicaSet struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this ReplicaSet. + // +optional + Spec ReplicaSetSpec + + // Status is the current status of this ReplicaSet. This data may be + // out of date by some window of time. + // +optional + Status ReplicaSetStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicaSet +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +// As the internal representation of a ReplicaSet, it must have +// a Template set. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the replica count. + // Must match in order to be controlled. + // If empty, defaulted to labels on pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // +optional + Template api.PodTemplateSpec +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replica set's current state. + // +optional + Conditions []ReplicaSetCondition +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/BUILD.bazel index 9817e4a9b33eb..c54ee3e16985a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/BUILD.bazel @@ -24,6 +24,5 @@ go_library( "//vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core/v1:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go index 565430b0e9607..34f6078b467df 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go @@ -29,7 +29,6 @@ import ( "k8s.io/kubernetes/pkg/apis/apps" api "k8s.io/kubernetes/pkg/apis/core" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/apis/extensions" ) func addConversionFuncs(scheme *runtime.Scheme) error { @@ -42,29 +41,29 @@ func addConversionFuncs(scheme *runtime.Scheme) error { Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec, Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy, Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy, - Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet, - Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, + Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet, + Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet, Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus, Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus, - Convert_v1_Deployment_To_extensions_Deployment, - Convert_extensions_Deployment_To_v1_Deployment, - Convert_extensions_DaemonSet_To_v1_DaemonSet, - Convert_v1_DaemonSet_To_extensions_DaemonSet, - Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec, - Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec, - Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy, - Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy, + Convert_v1_Deployment_To_apps_Deployment, + Convert_apps_Deployment_To_v1_Deployment, + Convert_apps_DaemonSet_To_v1_DaemonSet, + Convert_v1_DaemonSet_To_apps_DaemonSet, + Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec, + Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec, + Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy, + Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy, // extensions // TODO: below conversions should be dropped in favor of auto-generated // ones, see https://github.com/kubernetes/kubernetes/issues/39865 - Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec, - Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec, - Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy, - Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy, - Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment, - Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec, - Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, + Convert_v1_DeploymentSpec_To_apps_DeploymentSpec, + Convert_apps_DeploymentSpec_To_v1_DeploymentSpec, + Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy, + Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy, + Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment, + Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment, + Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec, + Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec, ) if err != nil { return err @@ -72,7 +71,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error { return nil } -func Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } @@ -80,7 +79,7 @@ func Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1.Deploymen if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.RevisionHistoryLimit = in.RevisionHistoryLimit @@ -93,13 +92,13 @@ func Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1.Deploymen return nil } -func Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(in *extensions.DeploymentSpec, out *appsv1.DeploymentSpec, s conversion.Scope) error { +func Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(in *apps.DeploymentSpec, out *appsv1.DeploymentSpec, s conversion.Scope) error { out.Replicas = &in.Replicas out.Selector = in.Selector if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } if in.RevisionHistoryLimit != nil { @@ -115,11 +114,11 @@ func Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(in *extensions.Deplo return nil } -func Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *appsv1.DeploymentStrategy, s conversion.Scope) error { +func Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1.DeploymentStrategy, s conversion.Scope) error { out.Type = appsv1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = new(appsv1.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -128,11 +127,11 @@ func Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(in *extensio return nil } -func Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *appsv1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { - out.RollingUpdate = new(extensions.RollingUpdateDeployment) - if err := Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + out.RollingUpdate = new(apps.RollingUpdateDeployment) + if err := Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -141,7 +140,7 @@ func Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *appsv1.D return nil } -func Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *appsv1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } @@ -151,7 +150,7 @@ func Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in return nil } -func Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *appsv1.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1.RollingUpdateDeployment, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } @@ -167,9 +166,9 @@ func Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in return nil } -func Convert_v1_Deployment_To_extensions_Deployment(in *appsv1.Deployment, out *extensions.Deployment, s conversion.Scope) error { +func Convert_v1_Deployment_To_apps_Deployment(in *appsv1.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -179,7 +178,7 @@ func Convert_v1_Deployment_To_extensions_Deployment(in *appsv1.Deployment, out * if revision64, err := strconv.ParseInt(revision, 10, 64); err != nil { return fmt.Errorf("failed to parse annotation[%s]=%s as int64: %v", appsv1.DeprecatedRollbackTo, revision, err) } else { - out.Spec.RollbackTo = new(extensions.RollbackConfig) + out.Spec.RollbackTo = new(apps.RollbackConfig) out.Spec.RollbackTo.Revision = revision64 } out.Annotations = deepCopyStringMap(out.Annotations) @@ -188,17 +187,17 @@ func Convert_v1_Deployment_To_extensions_Deployment(in *appsv1.Deployment, out * out.Spec.RollbackTo = nil } - if err := Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_extensions_Deployment_To_v1_Deployment(in *extensions.Deployment, out *appsv1.Deployment, s conversion.Scope) error { +func Convert_apps_Deployment_To_v1_Deployment(in *apps.Deployment, out *appsv1.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Annotations = deepCopyStringMap(out.Annotations) // deep copy because we modify it below - if err := Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -213,13 +212,13 @@ func Convert_extensions_Deployment_To_v1_Deployment(in *extensions.Deployment, o delete(out.Annotations, appsv1.DeprecatedRollbackTo) } - if err := Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DeploymentStatus_To_v1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *appsv1.RollingUpdateDaemonSet, s conversion.Scope) error { +func Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *appsv1.RollingUpdateDaemonSet, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } @@ -229,19 +228,19 @@ func Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in * return nil } -func Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *appsv1.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { +func Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *appsv1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } return nil } -func Convert_extensions_DaemonSet_To_v1_DaemonSet(in *extensions.DaemonSet, out *appsv1.DaemonSet, s conversion.Scope) error { +func Convert_apps_DaemonSet_To_v1_DaemonSet(in *apps.DaemonSet, out *appsv1.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Annotations = deepCopyStringMap(out.Annotations) // deep copy annotations because we change them below out.Annotations[appsv1.DeprecatedTemplateGeneration] = strconv.FormatInt(in.Spec.TemplateGeneration, 10) - if err := Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := s.Convert(&in.Status, &out.Status, 0); err != nil { @@ -250,12 +249,12 @@ func Convert_extensions_DaemonSet_To_v1_DaemonSet(in *extensions.DaemonSet, out return nil } -func Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *appsv1.DaemonSetSpec, s conversion.Scope) error { +func Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(in *apps.DaemonSetSpec, out *appsv1.DaemonSetSpec, s conversion.Scope) error { out.Selector = in.Selector if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = int32(in.MinReadySeconds) @@ -268,20 +267,20 @@ func Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(in *extensions.DaemonS return nil } -func Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *appsv1.DaemonSetUpdateStrategy, s conversion.Scope) error { +func Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *appsv1.DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = appsv1.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = &appsv1.RollingUpdateDaemonSet{} - if err := Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { + if err := Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } return nil } -func Convert_v1_DaemonSet_To_extensions_DaemonSet(in *appsv1.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { +func Convert_v1_DaemonSet_To_apps_DaemonSet(in *appsv1.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } if value, ok := in.Annotations[appsv1.DeprecatedTemplateGeneration]; ok { @@ -299,12 +298,12 @@ func Convert_v1_DaemonSet_To_extensions_DaemonSet(in *appsv1.DaemonSet, out *ext return nil } -func Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *appsv1.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { +func Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(in *appsv1.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error { out.Selector = in.Selector if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } if in.RevisionHistoryLimit != nil { @@ -317,18 +316,18 @@ func Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *appsv1.DaemonSetSp return nil } -func Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *appsv1.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) +func Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *appsv1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = apps.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { - out.RollingUpdate = &extensions.RollingUpdateDaemonSet{} - if err := Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { + out.RollingUpdate = &apps.RollingUpdateDaemonSet{} + if err := Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } return nil } -func Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *appsv1.ReplicaSetSpec, s conversion.Scope) error { +func Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *appsv1.ReplicaSetSpec, s conversion.Scope) error { out.Replicas = new(int32) *out.Replicas = int32(in.Replicas) out.MinReadySeconds = in.MinReadySeconds @@ -339,7 +338,7 @@ func Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *extensions.Repli return nil } -func Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *appsv1.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *appsv1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/doc.go index b70ddca6da8f9..7920560d2a3fb 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions // +k8s:conversion-gen-external-types=k8s.io/api/apps/v1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/apps/v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go index dfeb85fe87d3d..0c0c636dd85d0 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go @@ -31,7 +31,6 @@ import ( apps "k8s.io/kubernetes/pkg/apis/apps" core "k8s.io/kubernetes/pkg/apis/core" apiscorev1 "k8s.io/kubernetes/pkg/apis/core/v1" - extensions "k8s.io/kubernetes/pkg/apis/extensions" ) func init() { @@ -61,193 +60,193 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSet)(nil), (*extensions.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSet_To_extensions_DaemonSet(a.(*v1.DaemonSet), b.(*extensions.DaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*v1.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSet_To_apps_DaemonSet(a.(*v1.DaemonSet), b.(*apps.DaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSet)(nil), (*v1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSet_To_v1_DaemonSet(a.(*extensions.DaemonSet), b.(*v1.DaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSet)(nil), (*v1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSet_To_v1_DaemonSet(a.(*apps.DaemonSet), b.(*v1.DaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSetCondition)(nil), (*extensions.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition(a.(*v1.DaemonSetCondition), b.(*extensions.DaemonSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1.DaemonSetCondition)(nil), (*apps.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetCondition_To_apps_DaemonSetCondition(a.(*v1.DaemonSetCondition), b.(*apps.DaemonSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetCondition)(nil), (*v1.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition(a.(*extensions.DaemonSetCondition), b.(*v1.DaemonSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetCondition)(nil), (*v1.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetCondition_To_v1_DaemonSetCondition(a.(*apps.DaemonSetCondition), b.(*v1.DaemonSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSetList)(nil), (*extensions.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetList_To_extensions_DaemonSetList(a.(*v1.DaemonSetList), b.(*extensions.DaemonSetList), scope) + if err := s.AddGeneratedConversionFunc((*v1.DaemonSetList)(nil), (*apps.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetList_To_apps_DaemonSetList(a.(*v1.DaemonSetList), b.(*apps.DaemonSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetList)(nil), (*v1.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetList_To_v1_DaemonSetList(a.(*extensions.DaemonSetList), b.(*v1.DaemonSetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetList)(nil), (*v1.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetList_To_v1_DaemonSetList(a.(*apps.DaemonSetList), b.(*v1.DaemonSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSetSpec)(nil), (*extensions.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(a.(*v1.DaemonSetSpec), b.(*extensions.DaemonSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetSpec)(nil), (*v1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(a.(*extensions.DaemonSetSpec), b.(*v1.DaemonSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetSpec)(nil), (*v1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*v1.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSetStatus)(nil), (*extensions.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(a.(*v1.DaemonSetStatus), b.(*extensions.DaemonSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1.DaemonSetStatus)(nil), (*apps.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(a.(*v1.DaemonSetStatus), b.(*apps.DaemonSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetStatus)(nil), (*v1.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(a.(*extensions.DaemonSetStatus), b.(*v1.DaemonSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetStatus)(nil), (*v1.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(a.(*apps.DaemonSetStatus), b.(*v1.DaemonSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSetUpdateStrategy)(nil), (*extensions.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(a.(*v1.DaemonSetUpdateStrategy), b.(*extensions.DaemonSetUpdateStrategy), scope) + if err := s.AddGeneratedConversionFunc((*v1.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*v1.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetUpdateStrategy)(nil), (*v1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(a.(*extensions.DaemonSetUpdateStrategy), b.(*v1.DaemonSetUpdateStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*v1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*v1.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Deployment)(nil), (*extensions.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Deployment_To_extensions_Deployment(a.(*v1.Deployment), b.(*extensions.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*v1.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Deployment_To_apps_Deployment(a.(*v1.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.Deployment)(nil), (*v1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_Deployment_To_v1_Deployment(a.(*extensions.Deployment), b.(*v1.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.Deployment)(nil), (*v1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_Deployment_To_v1_Deployment(a.(*apps.Deployment), b.(*v1.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DeploymentCondition)(nil), (*extensions.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentCondition_To_extensions_DeploymentCondition(a.(*v1.DeploymentCondition), b.(*extensions.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentCondition_To_apps_DeploymentCondition(a.(*v1.DeploymentCondition), b.(*apps.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentCondition)(nil), (*v1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentCondition_To_v1_DeploymentCondition(a.(*extensions.DeploymentCondition), b.(*v1.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentCondition)(nil), (*v1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentCondition_To_v1_DeploymentCondition(a.(*apps.DeploymentCondition), b.(*v1.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DeploymentList)(nil), (*extensions.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentList_To_extensions_DeploymentList(a.(*v1.DeploymentList), b.(*extensions.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*v1.DeploymentList)(nil), (*apps.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentList_To_apps_DeploymentList(a.(*v1.DeploymentList), b.(*apps.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentList)(nil), (*v1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentList_To_v1_DeploymentList(a.(*extensions.DeploymentList), b.(*v1.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentList)(nil), (*v1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentList_To_v1_DeploymentList(a.(*apps.DeploymentList), b.(*v1.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentSpec)(nil), (*v1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1.DeploymentSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentSpec)(nil), (*v1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DeploymentStatus)(nil), (*extensions.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus(a.(*v1.DeploymentStatus), b.(*extensions.DeploymentStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentStatus_To_apps_DeploymentStatus(a.(*v1.DeploymentStatus), b.(*apps.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStatus)(nil), (*v1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus(a.(*extensions.DeploymentStatus), b.(*v1.DeploymentStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStatus)(nil), (*v1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStatus_To_v1_DeploymentStatus(a.(*apps.DeploymentStatus), b.(*v1.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*v1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStrategy)(nil), (*v1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicaSet)(nil), (*extensions.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSet_To_extensions_ReplicaSet(a.(*v1.ReplicaSet), b.(*extensions.ReplicaSet), scope) + if err := s.AddGeneratedConversionFunc((*v1.ReplicaSet)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicaSet_To_apps_ReplicaSet(a.(*v1.ReplicaSet), b.(*apps.ReplicaSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSet)(nil), (*v1.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSet_To_v1_ReplicaSet(a.(*extensions.ReplicaSet), b.(*v1.ReplicaSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSet)(nil), (*v1.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSet_To_v1_ReplicaSet(a.(*apps.ReplicaSet), b.(*v1.ReplicaSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetCondition)(nil), (*extensions.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(a.(*v1.ReplicaSetCondition), b.(*extensions.ReplicaSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetCondition)(nil), (*apps.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition(a.(*v1.ReplicaSetCondition), b.(*apps.ReplicaSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetCondition)(nil), (*v1.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition(a.(*extensions.ReplicaSetCondition), b.(*v1.ReplicaSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetCondition)(nil), (*v1.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition(a.(*apps.ReplicaSetCondition), b.(*v1.ReplicaSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetList)(nil), (*extensions.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetList_To_extensions_ReplicaSetList(a.(*v1.ReplicaSetList), b.(*extensions.ReplicaSetList), scope) + if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetList)(nil), (*apps.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicaSetList_To_apps_ReplicaSetList(a.(*v1.ReplicaSetList), b.(*apps.ReplicaSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetList)(nil), (*v1.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetList_To_v1_ReplicaSetList(a.(*extensions.ReplicaSetList), b.(*v1.ReplicaSetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetList)(nil), (*v1.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetList_To_v1_ReplicaSetList(a.(*apps.ReplicaSetList), b.(*v1.ReplicaSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(a.(*v1.ReplicaSetSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(a.(*extensions.ReplicaSetSpec), b.(*v1.ReplicaSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetStatus)(nil), (*extensions.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(a.(*v1.ReplicaSetStatus), b.(*extensions.ReplicaSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(a.(*v1.ReplicaSetStatus), b.(*apps.ReplicaSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetStatus)(nil), (*v1.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(a.(*extensions.ReplicaSetStatus), b.(*v1.ReplicaSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetStatus)(nil), (*v1.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(a.(*apps.ReplicaSetStatus), b.(*v1.ReplicaSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.RollingUpdateDaemonSet)(nil), (*extensions.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(a.(*v1.RollingUpdateDaemonSet), b.(*extensions.RollingUpdateDaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*v1.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDaemonSet)(nil), (*v1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(a.(*extensions.RollingUpdateDaemonSet), b.(*v1.RollingUpdateDaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*v1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -321,108 +320,108 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1.StatefulSetSpec), scope) + if err := s.AddConversionFunc((*apps.DaemonSetSpec)(nil), (*v1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*v1.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetStatus)(nil), (*v1.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(a.(*apps.StatefulSetStatus), b.(*v1.StatefulSetStatus), scope) + if err := s.AddConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*v1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*v1.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1.StatefulSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*apps.DaemonSet)(nil), (*v1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSet_To_v1_DaemonSet(a.(*apps.DaemonSet), b.(*v1.DaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DaemonSetSpec)(nil), (*v1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(a.(*extensions.DaemonSetSpec), b.(*v1.DaemonSetSpec), scope) + if err := s.AddConversionFunc((*apps.DeploymentSpec)(nil), (*v1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DaemonSetUpdateStrategy)(nil), (*v1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(a.(*extensions.DaemonSetUpdateStrategy), b.(*v1.DaemonSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*apps.DeploymentStrategy)(nil), (*v1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DaemonSet)(nil), (*v1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSet_To_v1_DaemonSet(a.(*extensions.DaemonSet), b.(*v1.DaemonSet), scope) + if err := s.AddConversionFunc((*apps.Deployment)(nil), (*v1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_Deployment_To_v1_Deployment(a.(*apps.Deployment), b.(*v1.Deployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentSpec)(nil), (*v1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1.DeploymentSpec), scope) + if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.Deployment)(nil), (*v1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_Deployment_To_v1_Deployment(a.(*extensions.Deployment), b.(*v1.Deployment), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(a.(*extensions.ReplicaSetSpec), b.(*v1.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1.StatefulSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDaemonSet)(nil), (*v1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(a.(*extensions.RollingUpdateDaemonSet), b.(*v1.RollingUpdateDaemonSet), scope) + if err := s.AddConversionFunc((*apps.StatefulSetStatus)(nil), (*v1.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(a.(*apps.StatefulSetStatus), b.(*v1.StatefulSetStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1.StatefulSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.DaemonSetSpec)(nil), (*extensions.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(a.(*v1.DaemonSetSpec), b.(*extensions.DaemonSetSpec), scope) + if err := s.AddConversionFunc((*v1.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.DaemonSetUpdateStrategy)(nil), (*extensions.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(a.(*v1.DaemonSetUpdateStrategy), b.(*extensions.DaemonSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*v1.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*v1.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.DaemonSet)(nil), (*extensions.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSet_To_extensions_DaemonSet(a.(*v1.DaemonSet), b.(*extensions.DaemonSet), scope) + if err := s.AddConversionFunc((*v1.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSet_To_apps_DaemonSet(a.(*v1.DaemonSet), b.(*apps.DaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddConversionFunc((*v1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*v1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.Deployment)(nil), (*extensions.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Deployment_To_extensions_Deployment(a.(*v1.Deployment), b.(*extensions.Deployment), scope) + if err := s.AddConversionFunc((*v1.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Deployment_To_apps_Deployment(a.(*v1.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicaSetSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(a.(*v1.ReplicaSetSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*v1.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.RollingUpdateDaemonSet)(nil), (*extensions.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(a.(*v1.RollingUpdateDaemonSet), b.(*extensions.RollingUpdateDaemonSet), scope) + if err := s.AddConversionFunc((*v1.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*v1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -514,30 +513,30 @@ func Convert_apps_ControllerRevisionList_To_v1_ControllerRevisionList(in *apps.C return autoConvert_apps_ControllerRevisionList_To_v1_ControllerRevisionList(in, out, s) } -func autoConvert_v1_DaemonSet_To_extensions_DaemonSet(in *v1.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { +func autoConvert_v1_DaemonSet_To_apps_DaemonSet(in *v1.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_extensions_DaemonSet_To_v1_DaemonSet(in *extensions.DaemonSet, out *v1.DaemonSet, s conversion.Scope) error { +func autoConvert_apps_DaemonSet_To_v1_DaemonSet(in *apps.DaemonSet, out *v1.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { - out.Type = extensions.DaemonSetConditionType(in.Type) +func autoConvert_v1_DaemonSetCondition_To_apps_DaemonSetCondition(in *v1.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error { + out.Type = apps.DaemonSetConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -545,12 +544,12 @@ func autoConvert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1.D return nil } -// Convert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition is an autogenerated conversion function. -func Convert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { - return autoConvert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition(in, out, s) +// Convert_v1_DaemonSetCondition_To_apps_DaemonSetCondition is an autogenerated conversion function. +func Convert_v1_DaemonSetCondition_To_apps_DaemonSetCondition(in *v1.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_v1_DaemonSetCondition_To_apps_DaemonSetCondition(in, out, s) } -func autoConvert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1.DaemonSetCondition, s conversion.Scope) error { +func autoConvert_apps_DaemonSetCondition_To_v1_DaemonSetCondition(in *apps.DaemonSetCondition, out *v1.DaemonSetCondition, s conversion.Scope) error { out.Type = v1.DaemonSetConditionType(in.Type) out.Status = corev1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -559,18 +558,18 @@ func autoConvert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition(in *exte return nil } -// Convert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition is an autogenerated conversion function. -func Convert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1.DaemonSetCondition, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition(in, out, s) +// Convert_apps_DaemonSetCondition_To_v1_DaemonSetCondition is an autogenerated conversion function. +func Convert_apps_DaemonSetCondition_To_v1_DaemonSetCondition(in *apps.DaemonSetCondition, out *v1.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_apps_DaemonSetCondition_To_v1_DaemonSetCondition(in, out, s) } -func autoConvert_v1_DaemonSetList_To_extensions_DaemonSetList(in *v1.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { +func autoConvert_v1_DaemonSetList_To_apps_DaemonSetList(in *v1.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.DaemonSet, len(*in)) + *out = make([]apps.DaemonSet, len(*in)) for i := range *in { - if err := Convert_v1_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1_DaemonSet_To_apps_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -580,18 +579,18 @@ func autoConvert_v1_DaemonSetList_To_extensions_DaemonSetList(in *v1.DaemonSetLi return nil } -// Convert_v1_DaemonSetList_To_extensions_DaemonSetList is an autogenerated conversion function. -func Convert_v1_DaemonSetList_To_extensions_DaemonSetList(in *v1.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { - return autoConvert_v1_DaemonSetList_To_extensions_DaemonSetList(in, out, s) +// Convert_v1_DaemonSetList_To_apps_DaemonSetList is an autogenerated conversion function. +func Convert_v1_DaemonSetList_To_apps_DaemonSetList(in *v1.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error { + return autoConvert_v1_DaemonSetList_To_apps_DaemonSetList(in, out, s) } -func autoConvert_extensions_DaemonSetList_To_v1_DaemonSetList(in *extensions.DaemonSetList, out *v1.DaemonSetList, s conversion.Scope) error { +func autoConvert_apps_DaemonSetList_To_v1_DaemonSetList(in *apps.DaemonSetList, out *v1.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1.DaemonSet, len(*in)) for i := range *in { - if err := Convert_extensions_DaemonSet_To_v1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_DaemonSet_To_v1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -601,17 +600,17 @@ func autoConvert_extensions_DaemonSetList_To_v1_DaemonSetList(in *extensions.Dae return nil } -// Convert_extensions_DaemonSetList_To_v1_DaemonSetList is an autogenerated conversion function. -func Convert_extensions_DaemonSetList_To_v1_DaemonSetList(in *extensions.DaemonSetList, out *v1.DaemonSetList, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetList_To_v1_DaemonSetList(in, out, s) +// Convert_apps_DaemonSetList_To_v1_DaemonSetList is an autogenerated conversion function. +func Convert_apps_DaemonSetList_To_v1_DaemonSetList(in *apps.DaemonSetList, out *v1.DaemonSetList, s conversion.Scope) error { + return autoConvert_apps_DaemonSetList_To_v1_DaemonSetList(in, out, s) } -func autoConvert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { +func autoConvert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(in *v1.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error { out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := apiscorev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -619,12 +618,12 @@ func autoConvert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1.DaemonSetSp return nil } -func autoConvert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *v1.DaemonSetSpec, s conversion.Scope) error { +func autoConvert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(in *apps.DaemonSetSpec, out *v1.DaemonSetSpec, s conversion.Scope) error { out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := apiscorev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -633,7 +632,7 @@ func autoConvert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(in *extensions.Dae return nil } -func autoConvert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { +func autoConvert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(in *v1.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -643,16 +642,16 @@ func autoConvert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1.DaemonS out.NumberAvailable = in.NumberAvailable out.NumberUnavailable = in.NumberUnavailable out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) - out.Conditions = *(*[]extensions.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus is an autogenerated conversion function. -func Convert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s) +// Convert_v1_DaemonSetStatus_To_apps_DaemonSetStatus is an autogenerated conversion function. +func Convert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(in *v1.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(in, out, s) } -func autoConvert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1.DaemonSetStatus, s conversion.Scope) error { +func autoConvert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(in *apps.DaemonSetStatus, out *v1.DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -666,17 +665,17 @@ func autoConvert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(in *extensions return nil } -// Convert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus is an autogenerated conversion function. -func Convert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(in, out, s) +// Convert_apps_DaemonSetStatus_To_v1_DaemonSetStatus is an autogenerated conversion function. +func Convert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(in *apps.DaemonSetStatus, out *v1.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(in, out, s) } -func autoConvert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *v1.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) +func autoConvert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *v1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = apps.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDaemonSet) - if err := Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDaemonSet) + if err := Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(*in, *out, s); err != nil { return err } } else { @@ -685,12 +684,12 @@ func autoConvert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrateg return nil } -func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *v1.DaemonSetUpdateStrategy, s conversion.Scope) error { +func autoConvert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *v1.DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = v1.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1.RollingUpdateDaemonSet) - if err := Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(*in, *out, s); err != nil { return err } } else { @@ -699,30 +698,30 @@ func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrateg return nil } -func autoConvert_v1_Deployment_To_extensions_Deployment(in *v1.Deployment, out *extensions.Deployment, s conversion.Scope) error { +func autoConvert_v1_Deployment_To_apps_Deployment(in *v1.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_extensions_Deployment_To_v1_Deployment(in *extensions.Deployment, out *v1.Deployment, s conversion.Scope) error { +func autoConvert_apps_Deployment_To_v1_Deployment(in *apps.Deployment, out *v1.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DeploymentStatus_To_v1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_v1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - out.Type = extensions.DeploymentConditionType(in.Type) +func autoConvert_v1_DeploymentCondition_To_apps_DeploymentCondition(in *v1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + out.Type = apps.DeploymentConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime @@ -731,12 +730,12 @@ func autoConvert_v1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1 return nil } -// Convert_v1_DeploymentCondition_To_extensions_DeploymentCondition is an autogenerated conversion function. -func Convert_v1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - return autoConvert_v1_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s) +// Convert_v1_DeploymentCondition_To_apps_DeploymentCondition is an autogenerated conversion function. +func Convert_v1_DeploymentCondition_To_apps_DeploymentCondition(in *v1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + return autoConvert_v1_DeploymentCondition_To_apps_DeploymentCondition(in, out, s) } -func autoConvert_extensions_DeploymentCondition_To_v1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1.DeploymentCondition, s conversion.Scope) error { +func autoConvert_apps_DeploymentCondition_To_v1_DeploymentCondition(in *apps.DeploymentCondition, out *v1.DeploymentCondition, s conversion.Scope) error { out.Type = v1.DeploymentConditionType(in.Type) out.Status = corev1.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime @@ -746,18 +745,18 @@ func autoConvert_extensions_DeploymentCondition_To_v1_DeploymentCondition(in *ex return nil } -// Convert_extensions_DeploymentCondition_To_v1_DeploymentCondition is an autogenerated conversion function. -func Convert_extensions_DeploymentCondition_To_v1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1.DeploymentCondition, s conversion.Scope) error { - return autoConvert_extensions_DeploymentCondition_To_v1_DeploymentCondition(in, out, s) +// Convert_apps_DeploymentCondition_To_v1_DeploymentCondition is an autogenerated conversion function. +func Convert_apps_DeploymentCondition_To_v1_DeploymentCondition(in *apps.DeploymentCondition, out *v1.DeploymentCondition, s conversion.Scope) error { + return autoConvert_apps_DeploymentCondition_To_v1_DeploymentCondition(in, out, s) } -func autoConvert_v1_DeploymentList_To_extensions_DeploymentList(in *v1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { +func autoConvert_v1_DeploymentList_To_apps_DeploymentList(in *v1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.Deployment, len(*in)) + *out = make([]apps.Deployment, len(*in)) for i := range *in { - if err := Convert_v1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1_Deployment_To_apps_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -767,18 +766,18 @@ func autoConvert_v1_DeploymentList_To_extensions_DeploymentList(in *v1.Deploymen return nil } -// Convert_v1_DeploymentList_To_extensions_DeploymentList is an autogenerated conversion function. -func Convert_v1_DeploymentList_To_extensions_DeploymentList(in *v1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - return autoConvert_v1_DeploymentList_To_extensions_DeploymentList(in, out, s) +// Convert_v1_DeploymentList_To_apps_DeploymentList is an autogenerated conversion function. +func Convert_v1_DeploymentList_To_apps_DeploymentList(in *v1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { + return autoConvert_v1_DeploymentList_To_apps_DeploymentList(in, out, s) } -func autoConvert_extensions_DeploymentList_To_v1_DeploymentList(in *extensions.DeploymentList, out *v1.DeploymentList, s conversion.Scope) error { +func autoConvert_apps_DeploymentList_To_v1_DeploymentList(in *apps.DeploymentList, out *v1.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1.Deployment, len(*in)) for i := range *in { - if err := Convert_extensions_Deployment_To_v1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_Deployment_To_v1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -788,12 +787,12 @@ func autoConvert_extensions_DeploymentList_To_v1_DeploymentList(in *extensions.D return nil } -// Convert_extensions_DeploymentList_To_v1_DeploymentList is an autogenerated conversion function. -func Convert_extensions_DeploymentList_To_v1_DeploymentList(in *extensions.DeploymentList, out *v1.DeploymentList, s conversion.Scope) error { - return autoConvert_extensions_DeploymentList_To_v1_DeploymentList(in, out, s) +// Convert_apps_DeploymentList_To_v1_DeploymentList is an autogenerated conversion function. +func Convert_apps_DeploymentList_To_v1_DeploymentList(in *apps.DeploymentList, out *v1.DeploymentList, s conversion.Scope) error { + return autoConvert_apps_DeploymentList_To_v1_DeploymentList(in, out, s) } -func autoConvert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func autoConvert_v1_DeploymentSpec_To_apps_DeploymentSpec(in *v1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -801,7 +800,7 @@ func autoConvert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1.Deploymen if err := apiscorev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -811,7 +810,7 @@ func autoConvert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1.Deploymen return nil } -func autoConvert_extensions_DeploymentSpec_To_v1_DeploymentSpec(in *extensions.DeploymentSpec, out *v1.DeploymentSpec, s conversion.Scope) error { +func autoConvert_apps_DeploymentSpec_To_v1_DeploymentSpec(in *apps.DeploymentSpec, out *v1.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -819,7 +818,7 @@ func autoConvert_extensions_DeploymentSpec_To_v1_DeploymentSpec(in *extensions.D if err := apiscorev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -830,24 +829,24 @@ func autoConvert_extensions_DeploymentSpec_To_v1_DeploymentSpec(in *extensions.D return nil } -func autoConvert_v1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { +func autoConvert_v1_DeploymentStatus_To_apps_DeploymentStatus(in *v1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.UnavailableReplicas = in.UnavailableReplicas - out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) return nil } -// Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus is an autogenerated conversion function. -func Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - return autoConvert_v1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) +// Convert_v1_DeploymentStatus_To_apps_DeploymentStatus is an autogenerated conversion function. +func Convert_v1_DeploymentStatus_To_apps_DeploymentStatus(in *v1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { + return autoConvert_v1_DeploymentStatus_To_apps_DeploymentStatus(in, out, s) } -func autoConvert_extensions_DeploymentStatus_To_v1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1.DeploymentStatus, s conversion.Scope) error { +func autoConvert_apps_DeploymentStatus_To_v1_DeploymentStatus(in *apps.DeploymentStatus, out *v1.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas @@ -859,17 +858,17 @@ func autoConvert_extensions_DeploymentStatus_To_v1_DeploymentStatus(in *extensio return nil } -// Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus is an autogenerated conversion function. -func Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1.DeploymentStatus, s conversion.Scope) error { - return autoConvert_extensions_DeploymentStatus_To_v1_DeploymentStatus(in, out, s) +// Convert_apps_DeploymentStatus_To_v1_DeploymentStatus is an autogenerated conversion function. +func Convert_apps_DeploymentStatus_To_v1_DeploymentStatus(in *apps.DeploymentStatus, out *v1.DeploymentStatus, s conversion.Scope) error { + return autoConvert_apps_DeploymentStatus_To_v1_DeploymentStatus(in, out, s) } -func autoConvert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *v1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func autoConvert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDeployment) - if err := Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDeployment) + if err := Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -878,12 +877,12 @@ func autoConvert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *v1.D return nil } -func autoConvert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *v1.DeploymentStrategy, s conversion.Scope) error { +func autoConvert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1.DeploymentStrategy, s conversion.Scope) error { out.Type = v1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -892,40 +891,40 @@ func autoConvert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(in *exte return nil } -func autoConvert_v1_ReplicaSet_To_extensions_ReplicaSet(in *v1.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { +func autoConvert_v1_ReplicaSet_To_apps_ReplicaSet(in *v1.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1_ReplicaSet_To_extensions_ReplicaSet is an autogenerated conversion function. -func Convert_v1_ReplicaSet_To_extensions_ReplicaSet(in *v1.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { - return autoConvert_v1_ReplicaSet_To_extensions_ReplicaSet(in, out, s) +// Convert_v1_ReplicaSet_To_apps_ReplicaSet is an autogenerated conversion function. +func Convert_v1_ReplicaSet_To_apps_ReplicaSet(in *v1.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { + return autoConvert_v1_ReplicaSet_To_apps_ReplicaSet(in, out, s) } -func autoConvert_extensions_ReplicaSet_To_v1_ReplicaSet(in *extensions.ReplicaSet, out *v1.ReplicaSet, s conversion.Scope) error { +func autoConvert_apps_ReplicaSet_To_v1_ReplicaSet(in *apps.ReplicaSet, out *v1.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_ReplicaSet_To_v1_ReplicaSet is an autogenerated conversion function. -func Convert_extensions_ReplicaSet_To_v1_ReplicaSet(in *extensions.ReplicaSet, out *v1.ReplicaSet, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSet_To_v1_ReplicaSet(in, out, s) +// Convert_apps_ReplicaSet_To_v1_ReplicaSet is an autogenerated conversion function. +func Convert_apps_ReplicaSet_To_v1_ReplicaSet(in *apps.ReplicaSet, out *v1.ReplicaSet, s conversion.Scope) error { + return autoConvert_apps_ReplicaSet_To_v1_ReplicaSet(in, out, s) } -func autoConvert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - out.Type = extensions.ReplicaSetConditionType(in.Type) +func autoConvert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *v1.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error { + out.Type = apps.ReplicaSetConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -933,12 +932,12 @@ func autoConvert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1 return nil } -// Convert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition is an autogenerated conversion function. -func Convert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in, out, s) +// Convert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition is an autogenerated conversion function. +func Convert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *v1.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in, out, s) } -func autoConvert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1.ReplicaSetCondition, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *v1.ReplicaSetCondition, s conversion.Scope) error { out.Type = v1.ReplicaSetConditionType(in.Type) out.Status = corev1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -947,18 +946,18 @@ func autoConvert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *ex return nil } -// Convert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition is an autogenerated conversion function. -func Convert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1.ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition(in, out, s) +// Convert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition is an autogenerated conversion function. +func Convert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *v1.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition(in, out, s) } -func autoConvert_v1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { +func autoConvert_v1_ReplicaSetList_To_apps_ReplicaSetList(in *v1.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.ReplicaSet, len(*in)) + *out = make([]apps.ReplicaSet, len(*in)) for i := range *in { - if err := Convert_v1_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1_ReplicaSet_To_apps_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -968,18 +967,18 @@ func autoConvert_v1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1.ReplicaSe return nil } -// Convert_v1_ReplicaSetList_To_extensions_ReplicaSetList is an autogenerated conversion function. -func Convert_v1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { - return autoConvert_v1_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s) +// Convert_v1_ReplicaSetList_To_apps_ReplicaSetList is an autogenerated conversion function. +func Convert_v1_ReplicaSetList_To_apps_ReplicaSetList(in *v1.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error { + return autoConvert_v1_ReplicaSetList_To_apps_ReplicaSetList(in, out, s) } -func autoConvert_extensions_ReplicaSetList_To_v1_ReplicaSetList(in *extensions.ReplicaSetList, out *v1.ReplicaSetList, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetList_To_v1_ReplicaSetList(in *apps.ReplicaSetList, out *v1.ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1.ReplicaSet, len(*in)) for i := range *in { - if err := Convert_extensions_ReplicaSet_To_v1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_ReplicaSet_To_v1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -989,12 +988,12 @@ func autoConvert_extensions_ReplicaSetList_To_v1_ReplicaSetList(in *extensions.R return nil } -// Convert_extensions_ReplicaSetList_To_v1_ReplicaSetList is an autogenerated conversion function. -func Convert_extensions_ReplicaSetList_To_v1_ReplicaSetList(in *extensions.ReplicaSetList, out *v1.ReplicaSetList, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetList_To_v1_ReplicaSetList(in, out, s) +// Convert_apps_ReplicaSetList_To_v1_ReplicaSetList is an autogenerated conversion function. +func Convert_apps_ReplicaSetList_To_v1_ReplicaSetList(in *apps.ReplicaSetList, out *v1.ReplicaSetList, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetList_To_v1_ReplicaSetList(in, out, s) } -func autoConvert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func autoConvert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *v1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1006,7 +1005,7 @@ func autoConvert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1.ReplicaSe return nil } -func autoConvert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *v1.ReplicaSetSpec, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *v1.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1018,22 +1017,22 @@ func autoConvert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *extensions.R return nil } -func autoConvert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { +func autoConvert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]extensions.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus is an autogenerated conversion function. -func Convert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s) +// Convert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus is an autogenerated conversion function. +func Convert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in, out, s) } -func autoConvert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1.ReplicaSetStatus, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *v1.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas @@ -1043,28 +1042,28 @@ func autoConvert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *extensio return nil } -// Convert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus is an autogenerated conversion function. -func Convert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(in, out, s) +// Convert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus is an autogenerated conversion function. +func Convert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *v1.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(in, out, s) } -func autoConvert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *v1.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { +func autoConvert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *v1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *v1.RollingUpdateDaemonSet, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *v1.RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *v1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *v1.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD.bazel index fca8f6a81604c..2ceadd325ad72 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD.bazel @@ -26,6 +26,5 @@ go_library( "//vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core/v1:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go index 680da870a9444..15804ad9bbf60 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go @@ -30,7 +30,6 @@ import ( "k8s.io/kubernetes/pkg/apis/autoscaling" api "k8s.io/kubernetes/pkg/apis/core" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/apis/extensions" ) func addConversionFuncs(scheme *runtime.Scheme) error { @@ -48,12 +47,12 @@ func addConversionFuncs(scheme *runtime.Scheme) error { // ones, see https://github.com/kubernetes/kubernetes/issues/39865 Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus, Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, - Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, - Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, - Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, - Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, + Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec, + Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec, + Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy, + Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy, + Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment, + Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, ) if err != nil { return err @@ -213,7 +212,7 @@ func Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *appsv1beta1.Scal return nil } -func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } @@ -221,14 +220,14 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta1 if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.RevisionHistoryLimit = in.RevisionHistoryLimit out.MinReadySeconds = in.MinReadySeconds out.Paused = in.Paused if in.RollbackTo != nil { - out.RollbackTo = new(extensions.RollbackConfig) + out.RollbackTo = new(apps.RollbackConfig) out.RollbackTo.Revision = in.RollbackTo.Revision } else { out.RollbackTo = nil @@ -240,13 +239,13 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta1 return nil } -func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *appsv1beta1.DeploymentSpec, s conversion.Scope) error { +func Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *appsv1beta1.DeploymentSpec, s conversion.Scope) error { out.Replicas = &in.Replicas out.Selector = in.Selector if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } if in.RevisionHistoryLimit != nil { @@ -268,11 +267,11 @@ func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions. return nil } -func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *appsv1beta1.DeploymentStrategy, s conversion.Scope) error { +func Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1beta1.DeploymentStrategy, s conversion.Scope) error { out.Type = appsv1beta1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = new(appsv1beta1.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -281,11 +280,11 @@ func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *ext return nil } -func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *appsv1beta1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { - out.RollingUpdate = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + out.RollingUpdate = new(apps.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -294,7 +293,7 @@ func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *app return nil } -func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *appsv1beta1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } @@ -304,7 +303,7 @@ func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployme return nil } -func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *appsv1beta1.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1beta1.RollingUpdateDeployment, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go index d4672d4f3fde3..372a18ae8d4cf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go @@ -16,7 +16,6 @@ limitations under the License. // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions // +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/apps/v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go index 3268019690a22..728db5da9235c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go @@ -32,7 +32,6 @@ import ( autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" core "k8s.io/kubernetes/pkg/apis/core" corev1 "k8s.io/kubernetes/pkg/apis/core/v1" - extensions "k8s.io/kubernetes/pkg/apis/extensions" ) func init() { @@ -62,93 +61,93 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.Deployment)(nil), (*extensions.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_Deployment_To_extensions_Deployment(a.(*v1beta1.Deployment), b.(*extensions.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Deployment_To_apps_Deployment(a.(*v1beta1.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.Deployment)(nil), (*v1beta1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_Deployment_To_v1beta1_Deployment(a.(*extensions.Deployment), b.(*v1beta1.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.Deployment)(nil), (*v1beta1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_Deployment_To_v1beta1_Deployment(a.(*apps.Deployment), b.(*v1beta1.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentCondition)(nil), (*extensions.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(a.(*v1beta1.DeploymentCondition), b.(*extensions.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(a.(*v1beta1.DeploymentCondition), b.(*apps.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentCondition)(nil), (*v1beta1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(a.(*extensions.DeploymentCondition), b.(*v1beta1.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentCondition)(nil), (*v1beta1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(a.(*apps.DeploymentCondition), b.(*v1beta1.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentList)(nil), (*extensions.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(a.(*v1beta1.DeploymentList), b.(*extensions.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentList)(nil), (*apps.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentList_To_apps_DeploymentList(a.(*v1beta1.DeploymentList), b.(*apps.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentList)(nil), (*v1beta1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(a.(*extensions.DeploymentList), b.(*v1beta1.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentList)(nil), (*v1beta1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentList_To_v1beta1_DeploymentList(a.(*apps.DeploymentList), b.(*v1beta1.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentRollback)(nil), (*extensions.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(a.(*v1beta1.DeploymentRollback), b.(*extensions.DeploymentRollback), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentRollback)(nil), (*apps.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(a.(*v1beta1.DeploymentRollback), b.(*apps.DeploymentRollback), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentRollback)(nil), (*v1beta1.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(a.(*extensions.DeploymentRollback), b.(*v1beta1.DeploymentRollback), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentRollback)(nil), (*v1beta1.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(a.(*apps.DeploymentRollback), b.(*v1beta1.DeploymentRollback), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStatus)(nil), (*extensions.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(a.(*v1beta1.DeploymentStatus), b.(*extensions.DeploymentStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(a.(*v1beta1.DeploymentStatus), b.(*apps.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStatus)(nil), (*v1beta1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(a.(*extensions.DeploymentStatus), b.(*v1beta1.DeploymentStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStatus)(nil), (*v1beta1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(a.(*apps.DeploymentStatus), b.(*v1beta1.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.RollbackConfig)(nil), (*extensions.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(a.(*v1beta1.RollbackConfig), b.(*extensions.RollbackConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.RollbackConfig)(nil), (*apps.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(a.(*v1beta1.RollbackConfig), b.(*apps.RollbackConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollbackConfig)(nil), (*v1beta1.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(a.(*extensions.RollbackConfig), b.(*v1beta1.RollbackConfig), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollbackConfig)(nil), (*v1beta1.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(a.(*apps.RollbackConfig), b.(*v1beta1.RollbackConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -252,48 +251,48 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta1.StatefulSetSpec), scope) + if err := s.AddConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1beta1.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1beta1.StatefulSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) + if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta1.StatefulSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1beta1.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1beta1.StatefulSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddConversionFunc((*v1beta1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -385,40 +384,40 @@ func Convert_apps_ControllerRevisionList_To_v1beta1_ControllerRevisionList(in *a return autoConvert_apps_ControllerRevisionList_To_v1beta1_ControllerRevisionList(in, out, s) } -func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *v1beta1.Deployment, out *extensions.Deployment, s conversion.Scope) error { +func autoConvert_v1beta1_Deployment_To_apps_Deployment(in *v1beta1.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta1_Deployment_To_extensions_Deployment is an autogenerated conversion function. -func Convert_v1beta1_Deployment_To_extensions_Deployment(in *v1beta1.Deployment, out *extensions.Deployment, s conversion.Scope) error { - return autoConvert_v1beta1_Deployment_To_extensions_Deployment(in, out, s) +// Convert_v1beta1_Deployment_To_apps_Deployment is an autogenerated conversion function. +func Convert_v1beta1_Deployment_To_apps_Deployment(in *v1beta1.Deployment, out *apps.Deployment, s conversion.Scope) error { + return autoConvert_v1beta1_Deployment_To_apps_Deployment(in, out, s) } -func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { +func autoConvert_apps_Deployment_To_v1beta1_Deployment(in *apps.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_Deployment_To_v1beta1_Deployment is an autogenerated conversion function. -func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { - return autoConvert_extensions_Deployment_To_v1beta1_Deployment(in, out, s) +// Convert_apps_Deployment_To_v1beta1_Deployment is an autogenerated conversion function. +func Convert_apps_Deployment_To_v1beta1_Deployment(in *apps.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { + return autoConvert_apps_Deployment_To_v1beta1_Deployment(in, out, s) } -func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - out.Type = extensions.DeploymentConditionType(in.Type) +func autoConvert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in *v1beta1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + out.Type = apps.DeploymentConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime @@ -427,12 +426,12 @@ func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(i return nil } -// Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition is an autogenerated conversion function. -func Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s) +// Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition is an autogenerated conversion function. +func Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in *v1beta1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in, out, s) } -func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { +func autoConvert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in *apps.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { out.Type = v1beta1.DeploymentConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime @@ -442,18 +441,18 @@ func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(i return nil } -// Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition is an autogenerated conversion function. -func Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { - return autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s) +// Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition is an autogenerated conversion function. +func Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in *apps.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { + return autoConvert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s) } -func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentList_To_apps_DeploymentList(in *v1beta1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.Deployment, len(*in)) + *out = make([]apps.Deployment, len(*in)) for i := range *in { - if err := Convert_v1beta1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta1_Deployment_To_apps_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -463,18 +462,18 @@ func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1 return nil } -// Convert_v1beta1_DeploymentList_To_extensions_DeploymentList is an autogenerated conversion function. -func Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in, out, s) +// Convert_v1beta1_DeploymentList_To_apps_DeploymentList is an autogenerated conversion function. +func Convert_v1beta1_DeploymentList_To_apps_DeploymentList(in *v1beta1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentList_To_apps_DeploymentList(in, out, s) } -func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { +func autoConvert_apps_DeploymentList_To_v1beta1_DeploymentList(in *apps.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta1.Deployment, len(*in)) for i := range *in { - if err := Convert_extensions_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -484,40 +483,40 @@ func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensi return nil } -// Convert_extensions_DeploymentList_To_v1beta1_DeploymentList is an autogenerated conversion function. -func Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { - return autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in, out, s) +// Convert_apps_DeploymentList_To_v1beta1_DeploymentList is an autogenerated conversion function. +func Convert_apps_DeploymentList_To_v1beta1_DeploymentList(in *apps.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { + return autoConvert_apps_DeploymentList_To_v1beta1_DeploymentList(in, out, s) } -func autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta1.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in *v1beta1.DeploymentRollback, out *apps.DeploymentRollback, s conversion.Scope) error { out.Name = in.Name out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) - if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { + if err := Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { return err } return nil } -// Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback is an autogenerated conversion function. -func Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta1.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s) +// Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback is an autogenerated conversion function. +func Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in *v1beta1.DeploymentRollback, out *apps.DeploymentRollback, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in, out, s) } -func autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { +func autoConvert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in *apps.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { out.Name = in.Name out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) - if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { + if err := Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { return err } return nil } -// Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback is an autogenerated conversion function. -func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { - return autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) +// Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback is an autogenerated conversion function. +func Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in *apps.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { + return autoConvert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) } -func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *v1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -525,18 +524,18 @@ func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta1 if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) out.Paused = in.Paused - out.RollbackTo = (*extensions.RollbackConfig)(unsafe.Pointer(in.RollbackTo)) + out.RollbackTo = (*apps.RollbackConfig)(unsafe.Pointer(in.RollbackTo)) out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) return nil } -func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *v1beta1.DeploymentSpec, s conversion.Scope) error { +func autoConvert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *v1beta1.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -544,7 +543,7 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensi if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -555,24 +554,24 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensi return nil } -func autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in *v1beta1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.UnavailableReplicas = in.UnavailableReplicas - out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) return nil } -// Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus is an autogenerated conversion function. -func Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) +// Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus is an autogenerated conversion function. +func Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in *v1beta1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in, out, s) } -func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { +func autoConvert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in *apps.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas @@ -584,17 +583,17 @@ func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *ext return nil } -// Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus is an autogenerated conversion function. -func Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { - return autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) +// Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus is an autogenerated conversion function. +func Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in *apps.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { + return autoConvert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) } -func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *v1beta1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func autoConvert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -603,12 +602,12 @@ func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in return nil } -func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *v1beta1.DeploymentStrategy, s conversion.Scope) error { +func autoConvert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1beta1.DeploymentStrategy, s conversion.Scope) error { out.Type = v1beta1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1beta1.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -617,33 +616,33 @@ func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in return nil } -func autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *v1beta1.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { +func autoConvert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in *v1beta1.RollbackConfig, out *apps.RollbackConfig, s conversion.Scope) error { out.Revision = in.Revision return nil } -// Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig is an autogenerated conversion function. -func Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *v1beta1.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { - return autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in, out, s) +// Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig is an autogenerated conversion function. +func Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in *v1beta1.RollbackConfig, out *apps.RollbackConfig, s conversion.Scope) error { + return autoConvert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in, out, s) } -func autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { +func autoConvert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { out.Revision = in.Revision return nil } -// Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig is an autogenerated conversion function. -func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { - return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) +// Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig is an autogenerated conversion function. +func Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { + return autoConvert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) } -func autoConvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *v1beta1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *v1beta1.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1beta1.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/BUILD.bazel index 657202d22b6b3..4d12259eecb0d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/BUILD.bazel @@ -26,6 +26,5 @@ go_library( "//vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core/v1:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go index 3bfd5279392f8..8d0e753a35cad 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go @@ -31,7 +31,6 @@ import ( autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" api "k8s.io/kubernetes/pkg/apis/core" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/apis/extensions" ) func addConversionFuncs(scheme *runtime.Scheme) error { @@ -44,31 +43,31 @@ func addConversionFuncs(scheme *runtime.Scheme) error { Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec, Convert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy, Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy, - Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet, - Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, + Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet, + Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet, Convert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus, Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus, - Convert_v1beta2_Deployment_To_extensions_Deployment, - Convert_extensions_Deployment_To_v1beta2_Deployment, - Convert_extensions_DaemonSet_To_v1beta2_DaemonSet, - Convert_v1beta2_DaemonSet_To_extensions_DaemonSet, - Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec, - Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec, - Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy, - Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy, + Convert_v1beta2_Deployment_To_apps_Deployment, + Convert_apps_Deployment_To_v1beta2_Deployment, + Convert_apps_DaemonSet_To_v1beta2_DaemonSet, + Convert_v1beta2_DaemonSet_To_apps_DaemonSet, + Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec, + Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec, + Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy, + Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy, // extensions // TODO: below conversions should be dropped in favor of auto-generated // ones, see https://github.com/kubernetes/kubernetes/issues/39865 Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus, Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus, - Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec, - Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec, - Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy, - Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy, - Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment, - Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec, - Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec, + Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec, + Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec, + Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy, + Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy, + Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment, + Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment, + Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec, + Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec, ) if err != nil { return err @@ -91,7 +90,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error { return nil } -func Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *appsv1beta2.RollingUpdateDaemonSet, s conversion.Scope) error { +func Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *appsv1beta2.RollingUpdateDaemonSet, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } @@ -101,7 +100,7 @@ func Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet return nil } -func Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *appsv1beta2.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { +func Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *appsv1beta2.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } @@ -294,7 +293,7 @@ func Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus(in *appsv1beta2.Scal return nil } -func Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta2.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1beta2.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } @@ -302,7 +301,7 @@ func Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta2 if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.RevisionHistoryLimit = in.RevisionHistoryLimit @@ -315,13 +314,13 @@ func Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta2 return nil } -func Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensions.DeploymentSpec, out *appsv1beta2.DeploymentSpec, s conversion.Scope) error { +func Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(in *apps.DeploymentSpec, out *appsv1beta2.DeploymentSpec, s conversion.Scope) error { out.Replicas = &in.Replicas out.Selector = in.Selector if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } if in.RevisionHistoryLimit != nil { @@ -337,11 +336,11 @@ func Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensions. return nil } -func Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *extensions.DeploymentStrategy, out *appsv1beta2.DeploymentStrategy, s conversion.Scope) error { +func Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1beta2.DeploymentStrategy, s conversion.Scope) error { out.Type = appsv1beta2.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = new(appsv1beta2.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -350,11 +349,11 @@ func Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *ext return nil } -func Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(in *appsv1beta2.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1beta2.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { - out.RollingUpdate = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + out.RollingUpdate = new(apps.RollingUpdateDeployment) + if err := Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -363,7 +362,7 @@ func Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(in *app return nil } -func Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *appsv1beta2.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1beta2.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } @@ -373,7 +372,7 @@ func Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployme return nil } -func Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *appsv1beta2.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1beta2.RollingUpdateDeployment, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } @@ -389,7 +388,7 @@ func Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployme return nil } -func Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *appsv1beta2.ReplicaSetSpec, s conversion.Scope) error { +func Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *appsv1beta2.ReplicaSetSpec, s conversion.Scope) error { out.Replicas = new(int32) *out.Replicas = int32(in.Replicas) out.MinReadySeconds = in.MinReadySeconds @@ -400,9 +399,9 @@ func Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *extensions. return nil } -func Convert_v1beta2_Deployment_To_extensions_Deployment(in *appsv1beta2.Deployment, out *extensions.Deployment, s conversion.Scope) error { +func Convert_v1beta2_Deployment_To_apps_Deployment(in *appsv1beta2.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -412,7 +411,7 @@ func Convert_v1beta2_Deployment_To_extensions_Deployment(in *appsv1beta2.Deploym if revision64, err := strconv.ParseInt(revision, 10, 64); err != nil { return fmt.Errorf("failed to parse annotation[%s]=%s as int64: %v", appsv1beta2.DeprecatedRollbackTo, revision, err) } else { - out.Spec.RollbackTo = new(extensions.RollbackConfig) + out.Spec.RollbackTo = new(apps.RollbackConfig) out.Spec.RollbackTo.Revision = revision64 } out.Annotations = deepCopyStringMap(out.Annotations) @@ -421,13 +420,13 @@ func Convert_v1beta2_Deployment_To_extensions_Deployment(in *appsv1beta2.Deploym out.Spec.RollbackTo = nil } - if err := Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *appsv1beta2.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *appsv1beta2.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } @@ -439,11 +438,11 @@ func Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *appsv1beta2 return nil } -func Convert_extensions_Deployment_To_v1beta2_Deployment(in *extensions.Deployment, out *appsv1beta2.Deployment, s conversion.Scope) error { +func Convert_apps_Deployment_To_v1beta2_Deployment(in *apps.Deployment, out *appsv1beta2.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Annotations = deepCopyStringMap(out.Annotations) // deep copy because we modify annotations below - if err := Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -458,17 +457,17 @@ func Convert_extensions_Deployment_To_v1beta2_Deployment(in *extensions.Deployme delete(out.Annotations, appsv1beta2.DeprecatedRollbackTo) } - if err := Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_extensions_DaemonSet_To_v1beta2_DaemonSet(in *extensions.DaemonSet, out *appsv1beta2.DaemonSet, s conversion.Scope) error { +func Convert_apps_DaemonSet_To_v1beta2_DaemonSet(in *apps.DaemonSet, out *appsv1beta2.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Annotations = deepCopyStringMap(out.Annotations) out.Annotations[appsv1beta2.DeprecatedTemplateGeneration] = strconv.FormatInt(in.Spec.TemplateGeneration, 10) - if err := Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := s.Convert(&in.Status, &out.Status, 0); err != nil { @@ -477,12 +476,12 @@ func Convert_extensions_DaemonSet_To_v1beta2_DaemonSet(in *extensions.DaemonSet, return nil } -func Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *extensions.DaemonSetSpec, out *appsv1beta2.DaemonSetSpec, s conversion.Scope) error { +func Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *apps.DaemonSetSpec, out *appsv1beta2.DaemonSetSpec, s conversion.Scope) error { out.Selector = in.Selector if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = int32(in.MinReadySeconds) @@ -495,20 +494,20 @@ func Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *extensions.Da return nil } -func Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *appsv1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error { +func Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *appsv1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = appsv1beta2.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = &appsv1beta2.RollingUpdateDaemonSet{} - if err := Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { + if err := Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } return nil } -func Convert_v1beta2_DaemonSet_To_extensions_DaemonSet(in *appsv1beta2.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { +func Convert_v1beta2_DaemonSet_To_apps_DaemonSet(in *appsv1beta2.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } if value, ok := in.Annotations[appsv1beta2.DeprecatedTemplateGeneration]; ok { @@ -526,12 +525,12 @@ func Convert_v1beta2_DaemonSet_To_extensions_DaemonSet(in *appsv1beta2.DaemonSet return nil } -func Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in *appsv1beta2.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { +func Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(in *appsv1beta2.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error { out.Selector = in.Selector if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } if in.RevisionHistoryLimit != nil { @@ -544,11 +543,11 @@ func Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in *appsv1beta2.D return nil } -func Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *appsv1beta2.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) +func Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *appsv1beta2.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = apps.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { - out.RollingUpdate = &extensions.RollingUpdateDaemonSet{} - if err := Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { + out.RollingUpdate = &apps.RollingUpdateDaemonSet{} + if err := Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/doc.go index cad91c57c3cf7..567551b74ab38 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/doc.go @@ -16,7 +16,6 @@ limitations under the License. // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions // +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta2 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/apps/v1beta2 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go index 5fabfdccbdab8..ae5d8692fa80b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go @@ -32,7 +32,6 @@ import ( autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" core "k8s.io/kubernetes/pkg/apis/core" corev1 "k8s.io/kubernetes/pkg/apis/core/v1" - extensions "k8s.io/kubernetes/pkg/apis/extensions" ) func init() { @@ -62,193 +61,193 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSet)(nil), (*extensions.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSet_To_extensions_DaemonSet(a.(*v1beta2.DaemonSet), b.(*extensions.DaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSet_To_apps_DaemonSet(a.(*v1beta2.DaemonSet), b.(*apps.DaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSet)(nil), (*v1beta2.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSet_To_v1beta2_DaemonSet(a.(*extensions.DaemonSet), b.(*v1beta2.DaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSet)(nil), (*v1beta2.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSet_To_v1beta2_DaemonSet(a.(*apps.DaemonSet), b.(*v1beta2.DaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetCondition)(nil), (*extensions.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition(a.(*v1beta2.DaemonSetCondition), b.(*extensions.DaemonSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetCondition)(nil), (*apps.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition(a.(*v1beta2.DaemonSetCondition), b.(*apps.DaemonSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetCondition)(nil), (*v1beta2.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition(a.(*extensions.DaemonSetCondition), b.(*v1beta2.DaemonSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetCondition)(nil), (*v1beta2.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition(a.(*apps.DaemonSetCondition), b.(*v1beta2.DaemonSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetList)(nil), (*extensions.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(a.(*v1beta2.DaemonSetList), b.(*extensions.DaemonSetList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetList)(nil), (*apps.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetList_To_apps_DaemonSetList(a.(*v1beta2.DaemonSetList), b.(*apps.DaemonSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetList)(nil), (*v1beta2.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(a.(*extensions.DaemonSetList), b.(*v1beta2.DaemonSetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetList)(nil), (*v1beta2.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetList_To_v1beta2_DaemonSetList(a.(*apps.DaemonSetList), b.(*v1beta2.DaemonSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetSpec)(nil), (*extensions.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(a.(*v1beta2.DaemonSetSpec), b.(*extensions.DaemonSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1beta2.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetSpec)(nil), (*v1beta2.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(a.(*extensions.DaemonSetSpec), b.(*v1beta2.DaemonSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetSpec)(nil), (*v1beta2.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*v1beta2.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetStatus)(nil), (*extensions.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(a.(*v1beta2.DaemonSetStatus), b.(*extensions.DaemonSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetStatus)(nil), (*apps.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(a.(*v1beta2.DaemonSetStatus), b.(*apps.DaemonSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetStatus)(nil), (*v1beta2.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(a.(*extensions.DaemonSetStatus), b.(*v1beta2.DaemonSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetStatus)(nil), (*v1beta2.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(a.(*apps.DaemonSetStatus), b.(*v1beta2.DaemonSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetUpdateStrategy)(nil), (*extensions.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(a.(*v1beta2.DaemonSetUpdateStrategy), b.(*extensions.DaemonSetUpdateStrategy), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*v1beta2.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetUpdateStrategy)(nil), (*v1beta2.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(a.(*extensions.DaemonSetUpdateStrategy), b.(*v1beta2.DaemonSetUpdateStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*v1beta2.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*v1beta2.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.Deployment)(nil), (*extensions.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Deployment_To_extensions_Deployment(a.(*v1beta2.Deployment), b.(*extensions.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_Deployment_To_apps_Deployment(a.(*v1beta2.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.Deployment)(nil), (*v1beta2.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_Deployment_To_v1beta2_Deployment(a.(*extensions.Deployment), b.(*v1beta2.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.Deployment)(nil), (*v1beta2.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_Deployment_To_v1beta2_Deployment(a.(*apps.Deployment), b.(*v1beta2.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentCondition)(nil), (*extensions.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(a.(*v1beta2.DeploymentCondition), b.(*extensions.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition(a.(*v1beta2.DeploymentCondition), b.(*apps.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentCondition)(nil), (*v1beta2.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(a.(*extensions.DeploymentCondition), b.(*v1beta2.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentCondition)(nil), (*v1beta2.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition(a.(*apps.DeploymentCondition), b.(*v1beta2.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentList)(nil), (*extensions.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentList_To_extensions_DeploymentList(a.(*v1beta2.DeploymentList), b.(*extensions.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentList)(nil), (*apps.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentList_To_apps_DeploymentList(a.(*v1beta2.DeploymentList), b.(*apps.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentList)(nil), (*v1beta2.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentList_To_v1beta2_DeploymentList(a.(*extensions.DeploymentList), b.(*v1beta2.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentList)(nil), (*v1beta2.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentList_To_v1beta2_DeploymentList(a.(*apps.DeploymentList), b.(*v1beta2.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1beta2.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta2.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentSpec)(nil), (*v1beta2.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1beta2.DeploymentSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta2.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta2.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentStatus)(nil), (*extensions.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(a.(*v1beta2.DeploymentStatus), b.(*extensions.DeploymentStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(a.(*v1beta2.DeploymentStatus), b.(*apps.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStatus)(nil), (*v1beta2.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(a.(*extensions.DeploymentStatus), b.(*v1beta2.DeploymentStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStatus)(nil), (*v1beta2.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(a.(*apps.DeploymentStatus), b.(*v1beta2.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1beta2.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta2.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1beta2.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1beta2.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta2.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta2.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSet)(nil), (*extensions.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(a.(*v1beta2.ReplicaSet), b.(*extensions.ReplicaSet), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSet)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ReplicaSet_To_apps_ReplicaSet(a.(*v1beta2.ReplicaSet), b.(*apps.ReplicaSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSet)(nil), (*v1beta2.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(a.(*extensions.ReplicaSet), b.(*v1beta2.ReplicaSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSet)(nil), (*v1beta2.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSet_To_v1beta2_ReplicaSet(a.(*apps.ReplicaSet), b.(*v1beta2.ReplicaSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetCondition)(nil), (*extensions.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(a.(*v1beta2.ReplicaSetCondition), b.(*extensions.ReplicaSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetCondition)(nil), (*apps.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition(a.(*v1beta2.ReplicaSetCondition), b.(*apps.ReplicaSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetCondition)(nil), (*v1beta2.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(a.(*extensions.ReplicaSetCondition), b.(*v1beta2.ReplicaSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetCondition)(nil), (*v1beta2.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(a.(*apps.ReplicaSetCondition), b.(*v1beta2.ReplicaSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetList)(nil), (*extensions.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList(a.(*v1beta2.ReplicaSetList), b.(*extensions.ReplicaSetList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetList)(nil), (*apps.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList(a.(*v1beta2.ReplicaSetList), b.(*apps.ReplicaSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetList)(nil), (*v1beta2.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(a.(*extensions.ReplicaSetList), b.(*v1beta2.ReplicaSetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetList)(nil), (*v1beta2.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList(a.(*apps.ReplicaSetList), b.(*v1beta2.ReplicaSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(a.(*v1beta2.ReplicaSetSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1beta2.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1beta2.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(a.(*extensions.ReplicaSetSpec), b.(*v1beta2.ReplicaSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1beta2.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1beta2.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetStatus)(nil), (*extensions.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(a.(*v1beta2.ReplicaSetStatus), b.(*extensions.ReplicaSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(a.(*v1beta2.ReplicaSetStatus), b.(*apps.ReplicaSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetStatus)(nil), (*v1beta2.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(a.(*extensions.ReplicaSetStatus), b.(*v1beta2.ReplicaSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetStatus)(nil), (*v1beta2.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(a.(*apps.ReplicaSetStatus), b.(*v1beta2.ReplicaSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.RollingUpdateDaemonSet)(nil), (*extensions.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(a.(*v1beta2.RollingUpdateDaemonSet), b.(*extensions.RollingUpdateDaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1beta2.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDaemonSet)(nil), (*v1beta2.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(a.(*extensions.RollingUpdateDaemonSet), b.(*v1beta2.RollingUpdateDaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1beta2.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1beta2.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1beta2.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta2.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1beta2.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1beta2.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta2.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta2.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -352,113 +351,113 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta2.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta2.StatefulSetSpec), scope) + if err := s.AddConversionFunc((*apps.DaemonSetSpec)(nil), (*v1beta2.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*v1beta2.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetStatus)(nil), (*v1beta2.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(a.(*apps.StatefulSetStatus), b.(*v1beta2.StatefulSetStatus), scope) + if err := s.AddConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*v1beta2.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*v1beta2.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1beta2.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1beta2.StatefulSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*apps.DaemonSet)(nil), (*v1beta2.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSet_To_v1beta2_DaemonSet(a.(*apps.DaemonSet), b.(*v1beta2.DaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta2.ScaleStatus), scope) + if err := s.AddConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta2.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta2.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DaemonSetSpec)(nil), (*v1beta2.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(a.(*extensions.DaemonSetSpec), b.(*v1beta2.DaemonSetSpec), scope) + if err := s.AddConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta2.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta2.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DaemonSetUpdateStrategy)(nil), (*v1beta2.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(a.(*extensions.DaemonSetUpdateStrategy), b.(*v1beta2.DaemonSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*apps.Deployment)(nil), (*v1beta2.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_Deployment_To_v1beta2_Deployment(a.(*apps.Deployment), b.(*v1beta2.Deployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DaemonSet)(nil), (*v1beta2.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSet_To_v1beta2_DaemonSet(a.(*extensions.DaemonSet), b.(*v1beta2.DaemonSet), scope) + if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1beta2.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1beta2.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentSpec)(nil), (*v1beta2.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1beta2.DeploymentSpec), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1beta2.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1beta2.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1beta2.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1beta2.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta2.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta2.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.Deployment)(nil), (*v1beta2.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_Deployment_To_v1beta2_Deployment(a.(*extensions.Deployment), b.(*v1beta2.Deployment), scope) + if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta2.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta2.StatefulSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1beta2.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(a.(*extensions.ReplicaSetSpec), b.(*v1beta2.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*apps.StatefulSetStatus)(nil), (*v1beta2.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(a.(*apps.StatefulSetStatus), b.(*v1beta2.StatefulSetStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDaemonSet)(nil), (*v1beta2.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(a.(*extensions.RollingUpdateDaemonSet), b.(*v1beta2.RollingUpdateDaemonSet), scope) + if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1beta2.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1beta2.StatefulSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1beta2.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1beta2.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta2.ScaleStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.DaemonSetSpec)(nil), (*extensions.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(a.(*v1beta2.DaemonSetSpec), b.(*extensions.DaemonSetSpec), scope) + if err := s.AddConversionFunc((*v1beta2.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1beta2.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.DaemonSetUpdateStrategy)(nil), (*extensions.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(a.(*v1beta2.DaemonSetUpdateStrategy), b.(*extensions.DaemonSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*v1beta2.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*v1beta2.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.DaemonSet)(nil), (*extensions.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSet_To_extensions_DaemonSet(a.(*v1beta2.DaemonSet), b.(*extensions.DaemonSet), scope) + if err := s.AddConversionFunc((*v1beta2.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSet_To_apps_DaemonSet(a.(*v1beta2.DaemonSet), b.(*apps.DaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1beta2.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddConversionFunc((*v1beta2.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta2.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1beta2.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*v1beta2.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta2.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.Deployment)(nil), (*extensions.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Deployment_To_extensions_Deployment(a.(*v1beta2.Deployment), b.(*extensions.Deployment), scope) + if err := s.AddConversionFunc((*v1beta2.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_Deployment_To_apps_Deployment(a.(*v1beta2.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.ReplicaSetSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(a.(*v1beta2.ReplicaSetSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*v1beta2.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1beta2.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.RollingUpdateDaemonSet)(nil), (*extensions.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(a.(*v1beta2.RollingUpdateDaemonSet), b.(*extensions.RollingUpdateDaemonSet), scope) + if err := s.AddConversionFunc((*v1beta2.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1beta2.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1beta2.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*v1beta2.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta2.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -555,30 +554,30 @@ func Convert_apps_ControllerRevisionList_To_v1beta2_ControllerRevisionList(in *a return autoConvert_apps_ControllerRevisionList_To_v1beta2_ControllerRevisionList(in, out, s) } -func autoConvert_v1beta2_DaemonSet_To_extensions_DaemonSet(in *v1beta2.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { +func autoConvert_v1beta2_DaemonSet_To_apps_DaemonSet(in *v1beta2.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_extensions_DaemonSet_To_v1beta2_DaemonSet(in *extensions.DaemonSet, out *v1beta2.DaemonSet, s conversion.Scope) error { +func autoConvert_apps_DaemonSet_To_v1beta2_DaemonSet(in *apps.DaemonSet, out *v1beta2.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1beta2.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { - out.Type = extensions.DaemonSetConditionType(in.Type) +func autoConvert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition(in *v1beta2.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error { + out.Type = apps.DaemonSetConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -586,12 +585,12 @@ func autoConvert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition(in return nil } -// Convert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition is an autogenerated conversion function. -func Convert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1beta2.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { - return autoConvert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition(in, out, s) +// Convert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition is an autogenerated conversion function. +func Convert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition(in *v1beta2.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition(in, out, s) } -func autoConvert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1beta2.DaemonSetCondition, s conversion.Scope) error { +func autoConvert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in *apps.DaemonSetCondition, out *v1beta2.DaemonSetCondition, s conversion.Scope) error { out.Type = v1beta2.DaemonSetConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -600,18 +599,18 @@ func autoConvert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in return nil } -// Convert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition is an autogenerated conversion function. -func Convert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1beta2.DaemonSetCondition, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in, out, s) +// Convert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition is an autogenerated conversion function. +func Convert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in *apps.DaemonSetCondition, out *v1beta2.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in, out, s) } -func autoConvert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(in *v1beta2.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { +func autoConvert_v1beta2_DaemonSetList_To_apps_DaemonSetList(in *v1beta2.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.DaemonSet, len(*in)) + *out = make([]apps.DaemonSet, len(*in)) for i := range *in { - if err := Convert_v1beta2_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta2_DaemonSet_To_apps_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -621,18 +620,18 @@ func autoConvert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(in *v1beta2.D return nil } -// Convert_v1beta2_DaemonSetList_To_extensions_DaemonSetList is an autogenerated conversion function. -func Convert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(in *v1beta2.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { - return autoConvert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(in, out, s) +// Convert_v1beta2_DaemonSetList_To_apps_DaemonSetList is an autogenerated conversion function. +func Convert_v1beta2_DaemonSetList_To_apps_DaemonSetList(in *v1beta2.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error { + return autoConvert_v1beta2_DaemonSetList_To_apps_DaemonSetList(in, out, s) } -func autoConvert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(in *extensions.DaemonSetList, out *v1beta2.DaemonSetList, s conversion.Scope) error { +func autoConvert_apps_DaemonSetList_To_v1beta2_DaemonSetList(in *apps.DaemonSetList, out *v1beta2.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta2.DaemonSet, len(*in)) for i := range *in { - if err := Convert_extensions_DaemonSet_To_v1beta2_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_DaemonSet_To_v1beta2_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -642,17 +641,17 @@ func autoConvert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(in *extension return nil } -// Convert_extensions_DaemonSetList_To_v1beta2_DaemonSetList is an autogenerated conversion function. -func Convert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(in *extensions.DaemonSetList, out *v1beta2.DaemonSetList, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(in, out, s) +// Convert_apps_DaemonSetList_To_v1beta2_DaemonSetList is an autogenerated conversion function. +func Convert_apps_DaemonSetList_To_v1beta2_DaemonSetList(in *apps.DaemonSetList, out *v1beta2.DaemonSetList, s conversion.Scope) error { + return autoConvert_apps_DaemonSetList_To_v1beta2_DaemonSetList(in, out, s) } -func autoConvert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta2.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { +func autoConvert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(in *v1beta2.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error { out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -660,12 +659,12 @@ func autoConvert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta2.D return nil } -func autoConvert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *extensions.DaemonSetSpec, out *v1beta2.DaemonSetSpec, s conversion.Scope) error { +func autoConvert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *apps.DaemonSetSpec, out *v1beta2.DaemonSetSpec, s conversion.Scope) error { out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -674,7 +673,7 @@ func autoConvert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *extension return nil } -func autoConvert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1beta2.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { +func autoConvert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(in *v1beta2.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -684,16 +683,16 @@ func autoConvert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1bet out.NumberAvailable = in.NumberAvailable out.NumberUnavailable = in.NumberUnavailable out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) - out.Conditions = *(*[]extensions.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus is an autogenerated conversion function. -func Convert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1beta2.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s) +// Convert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus is an autogenerated conversion function. +func Convert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(in *v1beta2.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(in, out, s) } -func autoConvert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1beta2.DaemonSetStatus, s conversion.Scope) error { +func autoConvert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *apps.DaemonSetStatus, out *v1beta2.DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -707,17 +706,17 @@ func autoConvert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *exten return nil } -// Convert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus is an autogenerated conversion function. -func Convert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1beta2.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in, out, s) +// Convert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus is an autogenerated conversion function. +func Convert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *apps.DaemonSetStatus, out *v1beta2.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in, out, s) } -func autoConvert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *v1beta2.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) +func autoConvert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *v1beta2.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = apps.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDaemonSet) - if err := Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDaemonSet) + if err := Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(*in, *out, s); err != nil { return err } } else { @@ -726,12 +725,12 @@ func autoConvert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateSt return nil } -func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *v1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error { +func autoConvert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *v1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = v1beta2.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1beta2.RollingUpdateDaemonSet) - if err := Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(*in, *out, s); err != nil { return err } } else { @@ -740,30 +739,30 @@ func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateSt return nil } -func autoConvert_v1beta2_Deployment_To_extensions_Deployment(in *v1beta2.Deployment, out *extensions.Deployment, s conversion.Scope) error { +func autoConvert_v1beta2_Deployment_To_apps_Deployment(in *v1beta2.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_extensions_Deployment_To_v1beta2_Deployment(in *extensions.Deployment, out *v1beta2.Deployment, s conversion.Scope) error { +func autoConvert_apps_Deployment_To_v1beta2_Deployment(in *apps.Deployment, out *v1beta2.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta2.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - out.Type = extensions.DeploymentConditionType(in.Type) +func autoConvert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition(in *v1beta2.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + out.Type = apps.DeploymentConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime @@ -772,12 +771,12 @@ func autoConvert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(i return nil } -// Convert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition is an autogenerated conversion function. -func Convert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta2.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - return autoConvert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s) +// Convert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition is an autogenerated conversion function. +func Convert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition(in *v1beta2.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + return autoConvert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition(in, out, s) } -func autoConvert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta2.DeploymentCondition, s conversion.Scope) error { +func autoConvert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition(in *apps.DeploymentCondition, out *v1beta2.DeploymentCondition, s conversion.Scope) error { out.Type = v1beta2.DeploymentConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime @@ -787,18 +786,18 @@ func autoConvert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(i return nil } -// Convert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition is an autogenerated conversion function. -func Convert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta2.DeploymentCondition, s conversion.Scope) error { - return autoConvert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(in, out, s) +// Convert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition is an autogenerated conversion function. +func Convert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition(in *apps.DeploymentCondition, out *v1beta2.DeploymentCondition, s conversion.Scope) error { + return autoConvert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition(in, out, s) } -func autoConvert_v1beta2_DeploymentList_To_extensions_DeploymentList(in *v1beta2.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { +func autoConvert_v1beta2_DeploymentList_To_apps_DeploymentList(in *v1beta2.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.Deployment, len(*in)) + *out = make([]apps.Deployment, len(*in)) for i := range *in { - if err := Convert_v1beta2_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta2_Deployment_To_apps_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -808,18 +807,18 @@ func autoConvert_v1beta2_DeploymentList_To_extensions_DeploymentList(in *v1beta2 return nil } -// Convert_v1beta2_DeploymentList_To_extensions_DeploymentList is an autogenerated conversion function. -func Convert_v1beta2_DeploymentList_To_extensions_DeploymentList(in *v1beta2.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - return autoConvert_v1beta2_DeploymentList_To_extensions_DeploymentList(in, out, s) +// Convert_v1beta2_DeploymentList_To_apps_DeploymentList is an autogenerated conversion function. +func Convert_v1beta2_DeploymentList_To_apps_DeploymentList(in *v1beta2.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { + return autoConvert_v1beta2_DeploymentList_To_apps_DeploymentList(in, out, s) } -func autoConvert_extensions_DeploymentList_To_v1beta2_DeploymentList(in *extensions.DeploymentList, out *v1beta2.DeploymentList, s conversion.Scope) error { +func autoConvert_apps_DeploymentList_To_v1beta2_DeploymentList(in *apps.DeploymentList, out *v1beta2.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta2.Deployment, len(*in)) for i := range *in { - if err := Convert_extensions_Deployment_To_v1beta2_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_Deployment_To_v1beta2_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -829,12 +828,12 @@ func autoConvert_extensions_DeploymentList_To_v1beta2_DeploymentList(in *extensi return nil } -// Convert_extensions_DeploymentList_To_v1beta2_DeploymentList is an autogenerated conversion function. -func Convert_extensions_DeploymentList_To_v1beta2_DeploymentList(in *extensions.DeploymentList, out *v1beta2.DeploymentList, s conversion.Scope) error { - return autoConvert_extensions_DeploymentList_To_v1beta2_DeploymentList(in, out, s) +// Convert_apps_DeploymentList_To_v1beta2_DeploymentList is an autogenerated conversion function. +func Convert_apps_DeploymentList_To_v1beta2_DeploymentList(in *apps.DeploymentList, out *v1beta2.DeploymentList, s conversion.Scope) error { + return autoConvert_apps_DeploymentList_To_v1beta2_DeploymentList(in, out, s) } -func autoConvert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta2.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func autoConvert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(in *v1beta2.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -842,7 +841,7 @@ func autoConvert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta2 if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -852,7 +851,7 @@ func autoConvert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta2 return nil } -func autoConvert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensions.DeploymentSpec, out *v1beta2.DeploymentSpec, s conversion.Scope) error { +func autoConvert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(in *apps.DeploymentSpec, out *v1beta2.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -860,7 +859,7 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensi if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -871,24 +870,24 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensi return nil } -func autoConvert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta2.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { +func autoConvert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(in *v1beta2.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.UnavailableReplicas = in.UnavailableReplicas - out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) return nil } -// Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus is an autogenerated conversion function. -func Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta2.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - return autoConvert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) +// Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus is an autogenerated conversion function. +func Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(in *v1beta2.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { + return autoConvert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(in, out, s) } -func autoConvert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta2.DeploymentStatus, s conversion.Scope) error { +func autoConvert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(in *apps.DeploymentStatus, out *v1beta2.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas @@ -900,17 +899,17 @@ func autoConvert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(in *ext return nil } -// Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus is an autogenerated conversion function. -func Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta2.DeploymentStatus, s conversion.Scope) error { - return autoConvert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(in, out, s) +// Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus is an autogenerated conversion function. +func Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(in *apps.DeploymentStatus, out *v1beta2.DeploymentStatus, s conversion.Scope) error { + return autoConvert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(in, out, s) } -func autoConvert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(in *v1beta2.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func autoConvert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1beta2.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDeployment) + if err := Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -919,12 +918,12 @@ func autoConvert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(in return nil } -func autoConvert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *extensions.DeploymentStrategy, out *v1beta2.DeploymentStrategy, s conversion.Scope) error { +func autoConvert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1beta2.DeploymentStrategy, s conversion.Scope) error { out.Type = v1beta2.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1beta2.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -933,40 +932,40 @@ func autoConvert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in return nil } -func autoConvert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(in *v1beta2.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { +func autoConvert_v1beta2_ReplicaSet_To_apps_ReplicaSet(in *v1beta2.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta2_ReplicaSet_To_extensions_ReplicaSet is an autogenerated conversion function. -func Convert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(in *v1beta2.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { - return autoConvert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(in, out, s) +// Convert_v1beta2_ReplicaSet_To_apps_ReplicaSet is an autogenerated conversion function. +func Convert_v1beta2_ReplicaSet_To_apps_ReplicaSet(in *v1beta2.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { + return autoConvert_v1beta2_ReplicaSet_To_apps_ReplicaSet(in, out, s) } -func autoConvert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(in *extensions.ReplicaSet, out *v1beta2.ReplicaSet, s conversion.Scope) error { +func autoConvert_apps_ReplicaSet_To_v1beta2_ReplicaSet(in *apps.ReplicaSet, out *v1beta2.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_ReplicaSet_To_v1beta2_ReplicaSet is an autogenerated conversion function. -func Convert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(in *extensions.ReplicaSet, out *v1beta2.ReplicaSet, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(in, out, s) +// Convert_apps_ReplicaSet_To_v1beta2_ReplicaSet is an autogenerated conversion function. +func Convert_apps_ReplicaSet_To_v1beta2_ReplicaSet(in *apps.ReplicaSet, out *v1beta2.ReplicaSet, s conversion.Scope) error { + return autoConvert_apps_ReplicaSet_To_v1beta2_ReplicaSet(in, out, s) } -func autoConvert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1beta2.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - out.Type = extensions.ReplicaSetConditionType(in.Type) +func autoConvert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *v1beta2.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error { + out.Type = apps.ReplicaSetConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -974,12 +973,12 @@ func autoConvert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(i return nil } -// Convert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition is an autogenerated conversion function. -func Convert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1beta2.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in, out, s) +// Convert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition is an autogenerated conversion function. +func Convert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *v1beta2.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition(in, out, s) } -func autoConvert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1beta2.ReplicaSetCondition, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *v1beta2.ReplicaSetCondition, s conversion.Scope) error { out.Type = v1beta2.ReplicaSetConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -988,18 +987,18 @@ func autoConvert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(i return nil } -// Convert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition is an autogenerated conversion function. -func Convert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1beta2.ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in, out, s) +// Convert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition is an autogenerated conversion function. +func Convert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *v1beta2.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in, out, s) } -func autoConvert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta2.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { +func autoConvert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList(in *v1beta2.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.ReplicaSet, len(*in)) + *out = make([]apps.ReplicaSet, len(*in)) for i := range *in { - if err := Convert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta2_ReplicaSet_To_apps_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1009,18 +1008,18 @@ func autoConvert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta2 return nil } -// Convert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList is an autogenerated conversion function. -func Convert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta2.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { - return autoConvert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s) +// Convert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList is an autogenerated conversion function. +func Convert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList(in *v1beta2.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error { + return autoConvert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList(in, out, s) } -func autoConvert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(in *extensions.ReplicaSetList, out *v1beta2.ReplicaSetList, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList(in *apps.ReplicaSetList, out *v1beta2.ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta2.ReplicaSet, len(*in)) for i := range *in { - if err := Convert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_ReplicaSet_To_v1beta2_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1030,12 +1029,12 @@ func autoConvert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(in *extensi return nil } -// Convert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList is an autogenerated conversion function. -func Convert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(in *extensions.ReplicaSetList, out *v1beta2.ReplicaSetList, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(in, out, s) +// Convert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList is an autogenerated conversion function. +func Convert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList(in *apps.ReplicaSetList, out *v1beta2.ReplicaSetList, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList(in, out, s) } -func autoConvert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1beta2.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func autoConvert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *v1beta2.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1047,7 +1046,7 @@ func autoConvert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1beta2 return nil } -func autoConvert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *v1beta2.ReplicaSetSpec, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *v1beta2.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1059,22 +1058,22 @@ func autoConvert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *extensi return nil } -func autoConvert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1beta2.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { +func autoConvert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1beta2.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]extensions.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus is an autogenerated conversion function. -func Convert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1beta2.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s) +// Convert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus is an autogenerated conversion function. +func Convert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1beta2.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(in, out, s) } -func autoConvert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1beta2.ReplicaSetStatus, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *v1beta2.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas @@ -1084,28 +1083,28 @@ func autoConvert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *ext return nil } -// Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus is an autogenerated conversion function. -func Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1beta2.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in, out, s) +// Convert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus is an autogenerated conversion function. +func Convert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *v1beta2.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in, out, s) } -func autoConvert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *v1beta2.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { +func autoConvert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *v1beta2.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *v1beta2.RollingUpdateDaemonSet, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *v1beta2.RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *v1beta2.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1beta2.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *v1beta2.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1beta2.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go index ac64c5c12e6e4..4b4e0e6634f7c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go @@ -88,6 +88,534 @@ func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. +func (in *DaemonSet) DeepCopy() *DaemonSet { + if in == nil { + return nil + } + out := new(DaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. +func (in *DaemonSetList) DeepCopy() *DaemonSetList { + if in == nil { + return nil + } + out := new(DaemonSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. +func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { + if in == nil { + return nil + } + out := new(DaemonSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. +func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { + if in == nil { + return nil + } + out := new(DaemonSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. +func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { + if in == nil { + return nil + } + out := new(DaemonSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.RollbackTo = in.RollbackTo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback. +func (in *DeploymentRollback) DeepCopy() *DeploymentRollback { + if in == nil { + return nil + } + out := new(DeploymentRollback) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentRollback) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. +func (in *ReplicaSet) DeepCopy() *ReplicaSet { + if in == nil { + return nil + } + out := new(ReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. +func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { + if in == nil { + return nil + } + out := new(ReplicaSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. +func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { + if in == nil { + return nil + } + out := new(ReplicaSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. +func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { + if in == nil { + return nil + } + out := new(ReplicaSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. +func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { + if in == nil { + return nil + } + out := new(ReplicaSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig. +func (in *RollbackConfig) DeepCopy() *RollbackConfig { + if in == nil { + return nil + } + out := new(RollbackConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { + *out = *in + out.MaxUnavailable = in.MaxUnavailable + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. +func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { + if in == nil { + return nil + } + out := new(RollingUpdateDaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + out.MaxUnavailable = in.MaxUnavailable + out.MaxSurge = in.MaxSurge + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/authentication/OWNERS index 2bdfd0ce5bc59..3b7ea1b131f25 100755 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/OWNERS @@ -1,9 +1,7 @@ +# approval on api packages bubbles to api-approvers reviewers: -- liggitt -- lavalamp -- wojtek-t -- deads2k -- sttts -- mbohlool -- jianhuiz -- enj +- sig-auth-authenticators-approvers +- sig-auth-authenticators-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go index 0afbdd3a3a647..b86561616ec71 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io + package authentication // import "k8s.io/kubernetes/pkg/apis/authentication" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.go index deb33cfc19837..203bf22bb3480 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.go @@ -59,6 +59,12 @@ type TokenReview struct { type TokenReviewSpec struct { // Token is the opaque bearer token. Token string + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + Audiences []string } // TokenReviewStatus is the result of the token authentication request. @@ -68,6 +74,16 @@ type TokenReviewStatus struct { Authenticated bool // User is the UserInfo associated with the provided token. User UserInfo + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + Audiences []string // Error indicates that the token couldn't be checked Error string } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go index 50ec02077ff33..6c4eabafa26f5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=authentication.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authentication/v1 + package v1 // import "k8s.io/kubernetes/pkg/apis/authentication/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go index afa00b1594fb7..8e95da3274b1f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go @@ -263,6 +263,7 @@ func Convert_authentication_TokenReview_To_v1_TokenReview(in *authentication.Tok func autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *v1.TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { out.Token = in.Token + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) return nil } @@ -273,6 +274,7 @@ func Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *v1.TokenRe func autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *v1.TokenReviewSpec, s conversion.Scope) error { out.Token = in.Token + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) return nil } @@ -286,6 +288,7 @@ func autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *v1 if err := Convert_v1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil { return err } + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) out.Error = in.Error return nil } @@ -300,6 +303,7 @@ func autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *au if err := Convert_authentication_UserInfo_To_v1_UserInfo(&in.User, &out.User, s); err != nil { return err } + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) out.Error = in.Error return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go index 7f7a5ffa3be38..07c1f4983300e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=authentication.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authentication/v1beta1 + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/authentication/v1beta1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.conversion.go index 97437b18f1a48..95d65c5dab3ee 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.conversion.go @@ -113,6 +113,7 @@ func Convert_authentication_TokenReview_To_v1beta1_TokenReview(in *authenticatio func autoConvert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *v1beta1.TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { out.Token = in.Token + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) return nil } @@ -123,6 +124,7 @@ func Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *v1bet func autoConvert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *v1beta1.TokenReviewSpec, s conversion.Scope) error { out.Token = in.Token + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) return nil } @@ -136,6 +138,7 @@ func autoConvert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(i if err := Convert_v1beta1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil { return err } + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) out.Error = in.Error return nil } @@ -150,6 +153,7 @@ func autoConvert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(i if err := Convert_authentication_UserInfo_To_v1beta1_UserInfo(&in.User, &out.User, s); err != nil { return err } + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) out.Error = in.Error return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go index 2a4cdd07abfc6..1f485500439d7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go @@ -136,7 +136,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } @@ -162,6 +162,11 @@ func (in *TokenReview) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -179,6 +184,11 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { *out = *in in.User.DeepCopyInto(&out.User) + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/authorization/OWNERS index c1613fc2e0263..ff4a7f4bf9ad0 100755 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/OWNERS @@ -1,17 +1,7 @@ +# approval on api packages bubbles to api-approvers reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- liggitt -- nikhiljindal -- erictune -- sttts -- ncdc -- dims -- mml -- mbohlool -- david-mcmahon -- jianhuiz -- enj +- sig-auth-authorizers-approvers +- sig-auth-authorizers-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go index 5cb3094aba8d1..896049861f65d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=authorization.k8s.io + package authorization // import "k8s.io/kubernetes/pkg/apis/authorization" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go index 11b7605c89888..6b4259dffec64 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authorization/v1 // +groupName=authorization.k8s.io + package v1 // import "k8s.io/kubernetes/pkg/apis/authorization/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go index a958fa36550b3..3300d9ce20b07 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authorization/v1beta1 // +groupName=authorization.k8s.io + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/authorization/v1beta1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go index eebc104305349..142cf726493fd 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go @@ -185,8 +185,10 @@ func Convert_v2beta1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *au } func Convert_autoscaling_PodsMetricSource_To_v2beta1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *autoscalingv2beta1.PodsMetricSource, s conversion.Scope) error { - targetAverageValue := *in.Target.AverageValue - out.TargetAverageValue = targetAverageValue + if in.Target.AverageValue != nil { + targetAverageValue := *in.Target.AverageValue + out.TargetAverageValue = targetAverageValue + } out.MetricName = in.Metric.Name out.Selector = in.Metric.Selector @@ -247,8 +249,10 @@ func Convert_autoscaling_ObjectMetricStatus_To_v2beta1_ObjectMetricStatus(in *au } out.MetricName = in.Metric.Name out.Selector = in.Metric.Selector - currentAverageValue := *in.Current.AverageValue - out.AverageValue = ¤tAverageValue + if in.Current.AverageValue != nil { + currentAverageValue := *in.Current.AverageValue + out.AverageValue = ¤tAverageValue + } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/defaults.go index e08f96b92f536..5fdf8e7122c63 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/defaults.go @@ -39,7 +39,7 @@ func SetDefaults_HorizontalPodAutoscaler(obj *autoscalingv2beta1.HorizontalPodAu { Type: autoscalingv2beta1.ResourceMetricSourceType, Resource: &autoscalingv2beta1.ResourceMetricSource{ - Name: v1.ResourceCPU, + Name: v1.ResourceCPU, TargetAverageUtilization: &utilizationDefaultVal, }, }, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/certificates/OWNERS index 1d1ab36e75604..796d862bd9c37 100755 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/OWNERS @@ -1,14 +1,7 @@ +# approval on api packages bubbles to api-approvers reviewers: -- thockin -- lavalamp -- smarterclayton -- deads2k -- caesarxuchao -- liggitt -- sttts -- dims -- errordeveloper -- mbohlool -- david-mcmahon -- jianhuiz -- enj +- sig-auth-certificates-approvers +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go index 65aad6491ebe4..c752aacaf57d5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=certificates.k8s.io + package certificates // import "k8s.io/kubernetes/pkg/apis/certificates" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go index d5f13dfff3a4b..8ba037c259264 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/certificates/v1beta1 // +groupName=certificates.k8s.io + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/certificates/v1beta1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/coordination/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/coordination/doc.go index c8e6b5ce60551..8cce2eda25299 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/coordination/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/coordination/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=coordination.k8s.io + package coordination // import "k8s.io/kubernetes/pkg/apis/coordination" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/coordination/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/coordination/v1beta1/doc.go index 406dbe4b33d56..da30f7fc2be67 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/coordination/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/coordination/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/coordination/v1beta1 // +groupName=coordination.k8s.io + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/coordination/v1beta1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go index a1e6daae4966f..bef73c0db0bf4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go @@ -82,4 +82,23 @@ const ( // // Not all cloud providers support this annotation, though AWS & GCE do. AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that + // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') + // of the last change, of some Pod or Service object, that triggered the endpoints object change. + // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints + // controller at T1, and the Endpoints object was changed at T2, the + // EndpointsLastChangeTriggerTime would be set to T0. + // + // The "endpoints change trigger" here means any Pod or Service change that resulted in the + // Endpoints object change. + // + // Given the definition of the "endpoints change trigger", please note that this annotation will + // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the + // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's + // already set). + // + // This annotation will be used to compute the in-cluster network programming latency SLI, see + // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md + EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go index 486122298c225..10c33f66bd1d6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go @@ -537,28 +537,3 @@ func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool { return false } - -// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements -// labels.Selector. -func ScopedResourceSelectorRequirementsAsSelector(ssr core.ScopedResourceSelectorRequirement) (labels.Selector, error) { - selector := labels.NewSelector() - var op selection.Operator - switch ssr.Operator { - case core.ScopeSelectorOpIn: - op = selection.In - case core.ScopeSelectorOpNotIn: - op = selection.NotIn - case core.ScopeSelectorOpExists: - op = selection.Exists - case core.ScopeSelectorOpDoesNotExist: - op = selection.DoesNotExist - default: - return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator) - } - r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - return selector, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go index 702ffa4d432f3..251547f601bdd 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -26,19 +26,19 @@ import ( const ( // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients - NamespaceDefault string = "default" + NamespaceDefault = "default" // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces - NamespaceAll string = "" + NamespaceAll = "" // NamespaceNone is the argument for a context when there is no namespace. - NamespaceNone string = "" + NamespaceNone = "" // NamespaceSystem is the system namespace where we place system components. - NamespaceSystem string = "kube-system" + NamespaceSystem = "kube-system" // NamespacePublic is the namespace where we place public info (ConfigMaps) - NamespacePublic string = "kube-public" + NamespacePublic = "kube-public" // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats) - NamespaceNodeLease string = "kube-node-lease" + NamespaceNodeLease = "kube-node-lease" // TerminationMessagePathDefault means the default path to capture the application termination message running in a container - TerminationMessagePathDefault string = "/dev/termination-log" + TerminationMessagePathDefault = "/dev/termination-log" ) // Volume represents a named volume in a pod that may be accessed by any containers in the pod. @@ -175,7 +175,7 @@ type PersistentVolumeSource struct { HostPath *HostPathVolumeSource // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod // +optional - Glusterfs *GlusterfsVolumeSource + Glusterfs *GlusterfsPersistentVolumeSource // NFS represents an NFS mount on the host that shares a pod's lifetime // +optional NFS *NFSVolumeSource @@ -229,7 +229,7 @@ type PersistentVolumeSource struct { // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource - // CSI (Container Storage Interface) represents storage that handled by an external CSI driver (Beta feature). + // CSI (Container Storage Interface) represents storage that handled by an external CSI driver. // +optional CSI *CSIPersistentVolumeSource } @@ -298,7 +298,7 @@ type PersistentVolumeSpec struct { MountOptions []string // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional VolumeMode *PersistentVolumeMode // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -410,7 +410,7 @@ type PersistentVolumeClaimSpec struct { StorageClassName *string // volumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional VolumeMode *PersistentVolumeMode // This field requires the VolumeSnapshotDataSource alpha feature gate to be @@ -935,6 +935,30 @@ type GlusterfsVolumeSource struct { ReadOnly bool } +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsPersistentVolumeSource struct { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + EndpointsName string + + // Path is the Glusterfs volume path. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + Path string + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + ReadOnly bool + + // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + EndpointsNamespace *string +} + // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { @@ -1523,7 +1547,7 @@ type LocalVolumeSource struct { FSType *string } -// Represents storage that is managed by an external CSI volume driver (Beta feature) +// Represents storage that is managed by an external CSI volume driver. type CSIPersistentVolumeSource struct { // Driver is the name of the driver to use for this volume. // Required. @@ -1552,7 +1576,7 @@ type CSIPersistentVolumeSource struct { // ControllerPublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional ControllerPublishSecretRef *SecretReference @@ -1560,7 +1584,7 @@ type CSIPersistentVolumeSource struct { // NodeStageSecretRef is a reference to the secret object containing sensitive // information to pass to the CSI driver to complete the CSI NodeStageVolume // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodeStageSecretRef *SecretReference @@ -1568,7 +1592,7 @@ type CSIPersistentVolumeSource struct { // NodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodePublishSecretRef *SecretReference @@ -1944,7 +1968,7 @@ type Container struct { // +optional VolumeMounts []VolumeMount // volumeDevices is the list of block devices to be used by the container. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional VolumeDevices []VolumeDevice // +optional @@ -2597,6 +2621,11 @@ type PodSpec struct { // This is an alpha feature and may change in the future. // +optional RuntimeClassName *string + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // If not specified, the default is true. + // +optional + EnableServiceLinks *bool } // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/BUILD.bazel index 2b8724a8fd604..d62da579277d1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/BUILD.bazel @@ -24,8 +24,8 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", "//vendor/k8s.io/kubernetes/pkg/features:go_default_library", "//vendor/k8s.io/kubernetes/pkg/util/parsers:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go index bab07f1950c77..926a39789ea96 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go @@ -26,8 +26,8 @@ import ( "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" ) func addConversionFuncs(scheme *runtime.Scheme) error { @@ -43,12 +43,12 @@ func addConversionFuncs(scheme *runtime.Scheme) error { Convert_v1_Secret_To_core_Secret, Convert_v1_ServiceSpec_To_core_ServiceSpec, Convert_v1_ResourceList_To_core_ResourceList, - Convert_v1_ReplicationController_To_extensions_ReplicaSet, - Convert_v1_ReplicationControllerSpec_To_extensions_ReplicaSetSpec, - Convert_v1_ReplicationControllerStatus_To_extensions_ReplicaSetStatus, - Convert_extensions_ReplicaSet_To_v1_ReplicationController, - Convert_extensions_ReplicaSetSpec_To_v1_ReplicationControllerSpec, - Convert_extensions_ReplicaSetStatus_To_v1_ReplicationControllerStatus, + Convert_v1_ReplicationController_To_apps_ReplicaSet, + Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec, + Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus, + Convert_apps_ReplicaSet_To_v1_ReplicationController, + Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec, + Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus, ) if err != nil { return err @@ -120,18 +120,18 @@ func addConversionFuncs(scheme *runtime.Scheme) error { return nil } -func Convert_v1_ReplicationController_To_extensions_ReplicaSet(in *v1.ReplicationController, out *extensions.ReplicaSet, s conversion.Scope) error { +func Convert_v1_ReplicationController_To_apps_ReplicaSet(in *v1.ReplicationController, out *apps.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ReplicationControllerSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1_ReplicationControllerStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_v1_ReplicationControllerSpec_To_extensions_ReplicaSetSpec(in *v1.ReplicationControllerSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(in *v1.ReplicationControllerSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { out.Replicas = *in.Replicas out.MinReadySeconds = in.MinReadySeconds if in.Selector != nil { @@ -146,15 +146,15 @@ func Convert_v1_ReplicationControllerSpec_To_extensions_ReplicaSetSpec(in *v1.Re return nil } -func Convert_v1_ReplicationControllerStatus_To_extensions_ReplicaSetStatus(in *v1.ReplicationControllerStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { +func Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(in *v1.ReplicationControllerStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.ObservedGeneration = in.ObservedGeneration for _, cond := range in.Conditions { - out.Conditions = append(out.Conditions, extensions.ReplicaSetCondition{ - Type: extensions.ReplicaSetConditionType(cond.Type), + out.Conditions = append(out.Conditions, apps.ReplicaSetCondition{ + Type: apps.ReplicaSetConditionType(cond.Type), Status: core.ConditionStatus(cond.Status), LastTransitionTime: cond.LastTransitionTime, Reason: cond.Reason, @@ -164,9 +164,9 @@ func Convert_v1_ReplicationControllerStatus_To_extensions_ReplicaSetStatus(in *v return nil } -func Convert_extensions_ReplicaSet_To_v1_ReplicationController(in *extensions.ReplicaSet, out *v1.ReplicationController, s conversion.Scope) error { +func Convert_apps_ReplicaSet_To_v1_ReplicationController(in *apps.ReplicaSet, out *v1.ReplicationController, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ReplicaSetSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { fieldErr, ok := err.(*field.Error) if !ok { return err @@ -176,13 +176,13 @@ func Convert_extensions_ReplicaSet_To_v1_ReplicationController(in *extensions.Re } out.Annotations[v1.NonConvertibleAnnotationPrefix+"/"+fieldErr.Field] = reflect.ValueOf(fieldErr.BadValue).String() } - if err := Convert_extensions_ReplicaSetStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_extensions_ReplicaSetSpec_To_v1_ReplicationControllerSpec(in *extensions.ReplicaSetSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { +func Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(in *apps.ReplicaSetSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { out.Replicas = new(int32) *out.Replicas = in.Replicas out.MinReadySeconds = in.MinReadySeconds @@ -197,7 +197,7 @@ func Convert_extensions_ReplicaSetSpec_To_v1_ReplicationControllerSpec(in *exten return invalidErr } -func Convert_extensions_ReplicaSetStatus_To_v1_ReplicationControllerStatus(in *extensions.ReplicaSetStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { +func Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(in *apps.ReplicaSetStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go index 1c7e2b32d6603..172d3797bf5b7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go @@ -160,8 +160,16 @@ func SetDefaults_Pod(obj *v1.Pod) { } } } + if obj.Spec.EnableServiceLinks == nil { + enableServiceLinks := v1.DefaultEnableServiceLinks + obj.Spec.EnableServiceLinks = &enableServiceLinks + } } func SetDefaults_PodSpec(obj *v1.PodSpec) { + // New fields added here will break upgrade tests: + // https://github.com/kubernetes/kubernetes/issues/69445 + // In most cases the new defaulted field can added to SetDefaults_Pod instead of here, so + // that it only materializes in the Pod object and not all objects with a PodSpec field. if obj.DNSPolicy == "" { obj.DNSPolicy = v1.DNSClusterFirst } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go index bf6c001b780a3..fa11a6b36a921 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go @@ -500,3 +500,28 @@ func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { return "" } + +// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements +// labels.Selector. +func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorRequirement) (labels.Selector, error) { + selector := labels.NewSelector() + var op selection.Operator + switch ssr.Operator { + case v1.ScopeSelectorOpIn: + op = selection.In + case v1.ScopeSelectorOpNotIn: + op = selection.NotIn + case v1.ScopeSelectorOpExists: + op = selection.Exists + case v1.ScopeSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator) + } + r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + return selector, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go index 7bfbcf0f119f7..4394014255637 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go @@ -29,8 +29,8 @@ import ( conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" + apps "k8s.io/kubernetes/pkg/apis/apps" core "k8s.io/kubernetes/pkg/apis/core" - extensions "k8s.io/kubernetes/pkg/apis/extensions" ) func init() { @@ -610,6 +610,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*v1.GlusterfsPersistentVolumeSource)(nil), (*core.GlusterfsPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(a.(*v1.GlusterfsPersistentVolumeSource), b.(*core.GlusterfsPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.GlusterfsPersistentVolumeSource)(nil), (*v1.GlusterfsPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(a.(*core.GlusterfsPersistentVolumeSource), b.(*v1.GlusterfsPersistentVolumeSource), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*v1.GlusterfsVolumeSource)(nil), (*core.GlusterfsVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(a.(*v1.GlusterfsVolumeSource), b.(*core.GlusterfsVolumeSource), scope) }); err != nil { @@ -1990,6 +2000,21 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(a.(*apps.ReplicaSetSpec), b.(*v1.ReplicationControllerSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apps.ReplicaSetStatus)(nil), (*v1.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(a.(*apps.ReplicaSetStatus), b.(*v1.ReplicationControllerStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apps.ReplicaSet)(nil), (*v1.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSet_To_v1_ReplicationController(a.(*apps.ReplicaSet), b.(*v1.ReplicationController), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*core.PodSecurityContext)(nil), (*v1.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_core_PodSecurityContext_To_v1_PodSecurityContext(a.(*core.PodSecurityContext), b.(*v1.PodSecurityContext), scope) }); err != nil { @@ -2020,21 +2045,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1_ReplicationControllerSpec(a.(*extensions.ReplicaSetSpec), b.(*v1.ReplicationControllerSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*extensions.ReplicaSetStatus)(nil), (*v1.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetStatus_To_v1_ReplicationControllerStatus(a.(*extensions.ReplicaSetStatus), b.(*v1.ReplicationControllerStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*extensions.ReplicaSet)(nil), (*v1.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSet_To_v1_ReplicationController(a.(*extensions.ReplicaSet), b.(*v1.ReplicationController), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*v1.PodSecurityContext)(nil), (*core.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_PodSecurityContext_To_core_PodSecurityContext(a.(*v1.PodSecurityContext), b.(*core.PodSecurityContext), scope) }); err != nil { @@ -2055,23 +2065,23 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicationControllerSpec)(nil), (*core.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(a.(*v1.ReplicationControllerSpec), b.(*core.ReplicationControllerSpec), scope) + if err := s.AddConversionFunc((*v1.ReplicationControllerSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(a.(*v1.ReplicationControllerSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicationControllerSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationControllerSpec_To_extensions_ReplicaSetSpec(a.(*v1.ReplicationControllerSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*v1.ReplicationControllerSpec)(nil), (*core.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(a.(*v1.ReplicationControllerSpec), b.(*core.ReplicationControllerSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicationControllerStatus)(nil), (*extensions.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationControllerStatus_To_extensions_ReplicaSetStatus(a.(*v1.ReplicationControllerStatus), b.(*extensions.ReplicaSetStatus), scope) + if err := s.AddConversionFunc((*v1.ReplicationControllerStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(a.(*v1.ReplicationControllerStatus), b.(*apps.ReplicaSetStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicationController)(nil), (*extensions.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationController_To_extensions_ReplicaSet(a.(*v1.ReplicationController), b.(*extensions.ReplicaSet), scope) + if err := s.AddConversionFunc((*v1.ReplicationController)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationController_To_apps_ReplicaSet(a.(*v1.ReplicationController), b.(*apps.ReplicaSet), scope) }); err != nil { return err } @@ -3602,6 +3612,32 @@ func Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepo return autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) } +func autoConvert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in *v1.GlusterfsPersistentVolumeSource, out *core.GlusterfsPersistentVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + out.EndpointsNamespace = (*string)(unsafe.Pointer(in.EndpointsNamespace)) + return nil +} + +// Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in *v1.GlusterfsPersistentVolumeSource, out *core.GlusterfsPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in *core.GlusterfsPersistentVolumeSource, out *v1.GlusterfsPersistentVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + out.EndpointsNamespace = (*string)(unsafe.Pointer(in.EndpointsNamespace)) + return nil +} + +// Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in *core.GlusterfsPersistentVolumeSource, out *v1.GlusterfsPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in, out, s) +} + func autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { out.EndpointsName = in.EndpointsName out.Path = in.Path @@ -4932,7 +4968,7 @@ func autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1 out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) out.AWSElasticBlockStore = (*core.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.Glusterfs = (*core.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.Glusterfs = (*core.GlusterfsPersistentVolumeSource)(unsafe.Pointer(in.Glusterfs)) out.NFS = (*core.NFSVolumeSource)(unsafe.Pointer(in.NFS)) out.RBD = (*core.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) out.ISCSI = (*core.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI)) @@ -4963,7 +4999,7 @@ func autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *co out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.Glusterfs = (*v1.GlusterfsPersistentVolumeSource)(unsafe.Pointer(in.Glusterfs)) out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) out.RBD = (*v1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) @@ -5550,6 +5586,7 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s out.DNSConfig = (*core.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) out.ReadinessGates = *(*[]core.PodReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) out.RuntimeClassName = (*string)(unsafe.Pointer(in.RuntimeClassName)) + out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks)) return nil } @@ -5616,6 +5653,7 @@ func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s out.DNSConfig = (*v1.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) out.ReadinessGates = *(*[]v1.PodReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) out.RuntimeClassName = (*string)(unsafe.Pointer(in.RuntimeClassName)) + out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/BUILD.bazel index a1ac2ccd6c85a..471d791647efa 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/BUILD.bazel @@ -11,7 +11,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/apis/core/validation", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -26,6 +25,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/api/service:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core/helper:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index db06578dd5449..6f0302c37fa1a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -27,7 +27,7 @@ import ( "regexp" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" @@ -903,6 +903,26 @@ func validateGlusterfsVolumeSource(glusterfs *core.GlusterfsVolumeSource, fldPat } return allErrs } +func validateGlusterfsPersistentVolumeSource(glusterfs *core.GlusterfsPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(glusterfs.EndpointsName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), "")) + } + if len(glusterfs.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + if glusterfs.EndpointsNamespace != nil { + endpointNs := glusterfs.EndpointsNamespace + if *endpointNs == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("endpointsNamespace"), *endpointNs, "if the endpointnamespace is set, it must be a valid namespace name")) + } else { + for _, msg := range ValidateNamespaceName(*endpointNs, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("endpointsNamespace"), *endpointNs, msg)) + } + } + } + return allErrs +} func validateFlockerVolumeSource(flocker *core.FlockerVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -1115,10 +1135,6 @@ func validateMountPropagation(mountPropagation *core.MountPropagationMode, conta if mountPropagation == nil { return allErrs } - if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) { - allErrs = append(allErrs, field.Forbidden(fldPath, "mount propagation is disabled by feature-gate")) - return allErrs - } supportedMountPropagations := sets.NewString(string(core.MountPropagationBidirectional), string(core.MountPropagationHostToContainer), string(core.MountPropagationNone)) if !supportedMountPropagations.Has(string(*mountPropagation)) { @@ -1427,24 +1443,27 @@ func validateStorageOSPersistentVolumeSource(storageos *core.StorageOSPersistent return allErrs } -func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList { +func ValidateCSIDriverName(driverName string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if !utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) { - allErrs = append(allErrs, field.Forbidden(fldPath, "CSIPersistentVolume disabled by feature-gate")) + if len(driverName) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) } - if len(csi.Driver) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) + if len(driverName) > 63 { + allErrs = append(allErrs, field.TooLong(fldPath, driverName, 63)) } - if len(csi.Driver) > 63 { - allErrs = append(allErrs, field.TooLong(fldPath.Child("driver"), csi.Driver, 63)) + if !csiDriverNameRexp.MatchString(driverName) { + allErrs = append(allErrs, field.Invalid(fldPath, driverName, validation.RegexError(csiDriverNameRexpErrMsg, csiDriverNameRexpFmt, "csi-hostpath"))) } + return allErrs +} - if !csiDriverNameRexp.MatchString(csi.Driver) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("driver"), csi.Driver, validation.RegexError(csiDriverNameRexpErrMsg, csiDriverNameRexpFmt, "csi-hostpath"))) - } +func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, ValidateCSIDriverName(csi.Driver, fldPath.Child("driver"))...) if len(csi.VolumeHandle) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), "")) @@ -1571,7 +1590,7 @@ func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList { allErrs = append(allErrs, field.Forbidden(specPath.Child("glusterfs"), "may not specify more than 1 volume type")) } else { numVolumes++ - allErrs = append(allErrs, validateGlusterfsVolumeSource(pv.Spec.Glusterfs, specPath.Child("glusterfs"))...) + allErrs = append(allErrs, validateGlusterfsPersistentVolumeSource(pv.Spec.Glusterfs, specPath.Child("glusterfs"))...) } } if pv.Spec.Flocker != nil { @@ -4285,7 +4304,7 @@ func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList { // We made allowed changes to oldNode, and now we compare oldNode to node. Any remaining differences indicate changes to protected fields. // TODO: Add a 'real' error type for this error and provide print actual diffs. if !apiequality.Semantic.DeepEqual(oldNode, node) { - glog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node) + klog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node) allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "node updates may only change labels, taints, or capacity (or configSource, if the DynamicKubeletConfig feature gate is enabled)")) } @@ -4912,7 +4931,7 @@ func validateScopedResourceSelectorRequirement(resourceQuotaSpec *core.ResourceQ case core.ScopeSelectorOpIn, core.ScopeSelectorOpNotIn: if len(req.Values) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("values"), - "must be atleast one value when `operator` is 'In' or 'NotIn' for scope selector")) + "must be at least one value when `operator` is 'In' or 'NotIn' for scope selector")) } case core.ScopeSelectorOpExists, core.ScopeSelectorOpDoesNotExist: if len(req.Values) != 0 { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go index 26fe6fa03219b..a4801c2e313b4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -1498,6 +1498,27 @@ func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) { + *out = *in + if in.EndpointsNamespace != nil { + in, out := &in.EndpointsNamespace, &out.EndpointsNamespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource. +func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { *out = *in @@ -2808,8 +2829,8 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { } if in.Glusterfs != nil { in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in + *out = new(GlusterfsPersistentVolumeSource) + (*in).DeepCopyInto(*out) } if in.NFS != nil { in, out := &in.NFS, &out.NFS @@ -3556,6 +3577,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(string) **out = **in } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/events/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/events/doc.go index 15095ad3a292e..a2a963f7fcbeb 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/events/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/events/doc.go @@ -15,4 +15,5 @@ limitations under the License. */ // +groupName=events.k8s.io + package events // import "k8s.io/kubernetes/pkg/apis/events" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/doc.go index 174190914fa12..17d89c6797212 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/events/v1beta1 // +groupName=events.k8s.io + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/events/v1beta1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD.bazel index 5c0151477b27a..60a644a241cad 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD.bazel @@ -12,11 +12,11 @@ go_library( importpath = "k8s.io/kubernetes/pkg/apis/extensions", visibility = ["//visibility:public"], deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/networking:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go index 84c2071ae664e..d4644ffaee318 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go @@ -19,6 +19,7 @@ package extensions import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/networking" "k8s.io/kubernetes/pkg/apis/policy" @@ -49,16 +50,16 @@ var ( func addKnownTypes(scheme *runtime.Scheme) error { // TODO this gets cleaned up when the types are fixed scheme.AddKnownTypes(SchemeGroupVersion, - &Deployment{}, - &DeploymentList{}, - &DeploymentRollback{}, + &apps.Deployment{}, + &apps.DeploymentList{}, + &apps.DeploymentRollback{}, &ReplicationControllerDummy{}, - &DaemonSetList{}, - &DaemonSet{}, + &apps.DaemonSetList{}, + &apps.DaemonSet{}, &Ingress{}, &IngressList{}, - &ReplicaSet{}, - &ReplicaSetList{}, + &apps.ReplicaSet{}, + &apps.ReplicaSetList{}, &policy.PodSecurityPolicy{}, &policy.PodSecurityPolicyList{}, &autoscaling.Scale{}, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go index 8dd49b4665403..20637e74e4607 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go @@ -29,7 +29,6 @@ support is experimental. package extensions import ( - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" api "k8s.io/kubernetes/pkg/apis/core" @@ -42,458 +41,6 @@ type ReplicationControllerDummy struct { metav1.TypeMeta } -// Alpha-level support for Custom Metrics in HPA (as annotations). -type CustomMetricTarget struct { - // Custom Metric name. - Name string - // Custom Metric value (average). - TargetValue resource.Quantity -} - -type CustomMetricTargetList struct { - Items []CustomMetricTarget -} - -type CustomMetricCurrentStatus struct { - // Custom Metric name. - Name string - // Custom Metric value (average). - CurrentValue resource.Quantity -} - -type CustomMetricCurrentStatusList struct { - Items []CustomMetricCurrentStatus -} - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type Deployment struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Specification of the desired behavior of the Deployment. - // +optional - Spec DeploymentSpec - - // Most recently observed status of the Deployment. - // +optional - Status DeploymentStatus -} - -type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - // +optional - Replicas int32 - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - // +optional - Selector *metav1.LabelSelector - - // Template describes the pods that will be created. - Template api.PodTemplateSpec - - // The deployment strategy to use to replace existing pods with new ones. - // +optional - Strategy DeploymentStrategy - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // +optional - RevisionHistoryLimit *int32 - - // Indicates that the deployment is paused and will not be processed by the - // deployment controller. - // +optional - Paused bool - - // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. - // +optional - RollbackTo *RollbackConfig - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. This is set to - // the max value of int32 (i.e. 2147483647) by default, which means "no deadline". - // +optional - ProgressDeadlineSeconds *int32 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED. -// DeploymentRollback stores the information required to rollback a deployment. -type DeploymentRollback struct { - metav1.TypeMeta - // Required: This must match the Name of a deployment. - Name string - // The annotations to be updated to a deployment - // +optional - UpdatedAnnotations map[string]string - // The config of this deployment rollback. - RollbackTo RollbackConfig -} - -// DEPRECATED. -type RollbackConfig struct { - // The revision to rollback to. If set to 0, rollback to the last revision. - // +optional - Revision int64 -} - -const ( - // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added - // to existing RCs (and label key that is added to its pods) to prevent the existing RCs - // to select new pods (and old pods being select by new RC). - DefaultDeploymentUniqueLabelKey string = "pod-template-hash" -) - -type DeploymentStrategy struct { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - // +optional - Type DeploymentStrategyType - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - // +optional - RollingUpdate *RollingUpdateDeployment -} - -type DeploymentStrategyType string - -const ( - // Kill all existing pods before creating new ones. - RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" - - // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. - RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" -) - -// Spec to control the desired behavior of rolling update. -type RollingUpdateDeployment struct { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). - // Absolute number is calculated from percentage by rounding down. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down by 30% - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that at least 70% of original number of pods are available at all times - // during the update. - // +optional - MaxUnavailable intstr.IntOrString - - // The maximum number of pods that can be scheduled above the original number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of total pods at - // the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up by 30% - // immediately when the rolling update starts. Once old pods have been killed, - // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of original pods. - // +optional - MaxSurge intstr.IntOrString -} - -type DeploymentStatus struct { - // The generation observed by the deployment controller. - // +optional - ObservedGeneration int64 - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - // +optional - Replicas int32 - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - // +optional - UpdatedReplicas int32 - - // Total number of ready pods targeted by this deployment. - // +optional - ReadyReplicas int32 - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - // +optional - AvailableReplicas int32 - - // Total number of unavailable pods targeted by this deployment. This is the total number of - // pods that are still required for the deployment to have 100% available capacity. They may - // either be pods that are running but not yet available or pods that still have not been created. - // +optional - UnavailableReplicas int32 - - // Represents the latest available observations of a deployment's current state. - Conditions []DeploymentCondition - - // Count of hash collisions for the Deployment. The Deployment controller uses this - // field as a collision avoidance mechanism when it needs to create the name for the - // newest ReplicaSet. - // +optional - CollisionCount *int32 -} - -type DeploymentConditionType string - -// These are valid conditions of a deployment. -const ( - // Available means the deployment is available, ie. at least the minimum available - // replicas required are up and running for at least minReadySeconds. - DeploymentAvailable DeploymentConditionType = "Available" - // Progressing means the deployment is progressing. Progress for a deployment is - // considered when a new replica set is created or adopted, and when new pods scale - // up or old pods scale down. Progress is not estimated for paused deployments or - // when progressDeadlineSeconds is not specified. - DeploymentProgressing DeploymentConditionType = "Progressing" - // ReplicaFailure is added in a deployment when one of its pods fails to be created - // or deleted. - DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" -) - -// DeploymentCondition describes the state of a deployment at a certain point. -type DeploymentCondition struct { - // Type of deployment condition. - Type DeploymentConditionType - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus - // The last time this condition was updated. - LastUpdateTime metav1.Time - // Last time the condition transitioned from one status to another. - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - Reason string - // A human readable message indicating details about the transition. - Message string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type DeploymentList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is the list of deployments. - Items []Deployment -} - -type DaemonSetUpdateStrategy struct { - // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". - // Default is OnDelete. - // +optional - Type DaemonSetUpdateStrategyType - - // Rolling update config params. Present only if type = "RollingUpdate". - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as Deployment `strategy.rollingUpdate`. - // See https://github.com/kubernetes/kubernetes/issues/35345 - // +optional - RollingUpdate *RollingUpdateDaemonSet -} - -type DaemonSetUpdateStrategyType string - -const ( - // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. - RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" - - // Replace the old daemons only when it's killed - OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" -) - -// Spec to control the desired behavior of daemon set rolling update. -type RollingUpdateDaemonSet struct { - // The maximum number of DaemonSet pods that can be unavailable during the - // update. Value can be an absolute number (ex: 5) or a percentage of total - // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding up. - // This cannot be 0. - // Default value is 1. - // Example: when this is set to 30%, at most 30% of the total number of nodes - // that should be running the daemon pod (i.e. status.desiredNumberScheduled) - // can have their pods stopped for an update at any given - // time. The update starts by stopping at most 30% of those DaemonSet pods - // and then brings up new DaemonSet pods in their place. Once the new pods - // are available, it then proceeds onto other DaemonSet pods, thus ensuring - // that at least 70% of original number of DaemonSet pods are available at - // all times during the update. - // +optional - MaxUnavailable intstr.IntOrString -} - -// DaemonSetSpec is the specification of a daemon set. -type DaemonSetSpec struct { - // A label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector - - // An object that describes the pod that will be created. - // The DaemonSet will create exactly one copy of this pod on every node - // that matches the template's node selector (or on every node if no node - // selector is specified). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - Template api.PodTemplateSpec - - // An update strategy to replace existing DaemonSet pods with new pods. - // +optional - UpdateStrategy DaemonSetUpdateStrategy - - // The minimum number of seconds for which a newly created DaemonSet pod should - // be ready without any of its container crashing, for it to be considered - // available. Defaults to 0 (pod will be considered available as soon as it - // is ready). - // +optional - MinReadySeconds int32 - - // DEPRECATED. - // A sequence number representing a specific generation of the template. - // Populated by the system. It can be set only during the creation. - // +optional - TemplateGeneration int64 - - // The number of old history to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 10. - // +optional - RevisionHistoryLimit *int32 -} - -// DaemonSetStatus represents the current status of a daemon set. -type DaemonSetStatus struct { - // The number of nodes that are running at least 1 - // daemon pod and are supposed to run the daemon pod. - CurrentNumberScheduled int32 - - // The number of nodes that are running the daemon pod, but are - // not supposed to run the daemon pod. - NumberMisscheduled int32 - - // The total number of nodes that should be running the daemon - // pod (including nodes correctly running the daemon pod). - DesiredNumberScheduled int32 - - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. - NumberReady int32 - - // The most recent generation observed by the daemon set controller. - // +optional - ObservedGeneration int64 - - // The total number of nodes that are running updated daemon pod - // +optional - UpdatedNumberScheduled int32 - - // The number of nodes that should be running the - // daemon pod and have one or more of the daemon pod running and - // available (ready for at least spec.minReadySeconds) - // +optional - NumberAvailable int32 - - // The number of nodes that should be running the - // daemon pod and have none of the daemon pod running and available - // (ready for at least spec.minReadySeconds) - // +optional - NumberUnavailable int32 - - // Count of hash collisions for the DaemonSet. The DaemonSet controller - // uses this field as a collision avoidance mechanism when it needs to - // create the name for the newest ControllerRevision. - // +optional - CollisionCount *int32 - - // Represents the latest available observations of a DaemonSet's current state. - Conditions []DaemonSetCondition -} - -type DaemonSetConditionType string - -// TODO: Add valid condition types of a DaemonSet. - -// DaemonSetCondition describes the state of a DaemonSet at a certain point. -type DaemonSetCondition struct { - // Type of DaemonSet condition. - Type DaemonSetConditionType - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus - // Last time the condition transitioned from one status to another. - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - Reason string - // A human readable message indicating details about the transition. - Message string -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DaemonSet represents the configuration of a daemon set. -type DaemonSet struct { - metav1.TypeMeta - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta - - // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec DaemonSetSpec - - // The current status of this daemon set. This data may be - // out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status DaemonSetStatus -} - -const ( - // DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead. - // DaemonSetTemplateGenerationKey is the key of the labels that is added - // to daemon set pods to distinguish between old and new pod templates - // during DaemonSet template update. - DaemonSetTemplateGenerationKey string = "pod-template-generation" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DaemonSetList is a collection of daemon sets. -type DaemonSetList struct { - metav1.TypeMeta - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta - - // A list of daemon sets. - Items []DaemonSet -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -662,114 +209,3 @@ type IngressBackend struct { // Specifies the port of the referenced service. ServicePort intstr.IntOrString } - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicaSet ensures that a specified number of pod replicas are running at any given time. -type ReplicaSet struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired behavior of this ReplicaSet. - // +optional - Spec ReplicaSetSpec - - // Status is the current status of this ReplicaSet. This data may be - // out of date by some window of time. - // +optional - Status ReplicaSetStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicaSetList is a collection of ReplicaSets. -type ReplicaSetList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ReplicaSet -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -// As the internal representation of a ReplicaSet, it must have -// a Template set. -type ReplicaSetSpec struct { - // Replicas is the number of desired replicas. - Replicas int32 - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 - - // Selector is a label query over pods that should match the replica count. - // Must match in order to be controlled. - // If empty, defaulted to labels on pod template. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - // +optional - Template api.PodTemplateSpec -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -type ReplicaSetStatus struct { - // Replicas is the number of actual replicas. - Replicas int32 - - // The number of pods that have labels matching the labels of the pod template of the replicaset. - // +optional - FullyLabeledReplicas int32 - - // The number of ready replicas for this replica set. - // +optional - ReadyReplicas int32 - - // The number of available replicas (ready for at least minReadySeconds) for this replica set. - // +optional - AvailableReplicas int32 - - // ObservedGeneration is the most recent generation observed by the controller. - // +optional - ObservedGeneration int64 - - // Represents the latest available observations of a replica set's current state. - // +optional - Conditions []ReplicaSetCondition -} - -type ReplicaSetConditionType string - -// These are valid conditions of a replica set. -const ( - // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created - // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted - // due to kubelet being down or finalizers are failing. - ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" -) - -// ReplicaSetCondition describes the state of a replica set at a certain point. -type ReplicaSetCondition struct { - // Type of replica set condition. - Type ReplicaSetConditionType - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus - // The last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - // +optional - Reason string - // A human readable message indicating details about the transition. - // +optional - Message string -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD.bazel index 23a92291294bb..8bc8af7ef9258 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD.bazel @@ -22,6 +22,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go index ce7b78ae20bf7..80a83b9b289a5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go @@ -27,10 +27,10 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" api "k8s.io/kubernetes/pkg/apis/core" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/networking" ) @@ -39,16 +39,16 @@ func addConversionFuncs(scheme *runtime.Scheme) error { err := scheme.AddConversionFuncs( Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus, Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus, - Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, - Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, - Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, - Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, - Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, - Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet, - Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, - Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, - Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, + Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec, + Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec, + Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy, + Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy, + Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, + Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment, + Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet, + Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet, + Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, + Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec, Convert_v1beta1_NetworkPolicy_To_networking_NetworkPolicy, Convert_networking_NetworkPolicy_To_v1beta1_NetworkPolicy, Convert_v1beta1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule, @@ -106,13 +106,13 @@ func Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *extensionsv1beta return nil } -func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *extensionsv1beta1.DeploymentSpec, s conversion.Scope) error { +func Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *extensionsv1beta1.DeploymentSpec, s conversion.Scope) error { out.Replicas = &in.Replicas out.Selector = in.Selector if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } if in.RevisionHistoryLimit != nil { @@ -134,7 +134,7 @@ func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions. return nil } -func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *extensionsv1beta1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *extensionsv1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } @@ -142,14 +142,14 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *extensionsv if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.RevisionHistoryLimit = in.RevisionHistoryLimit out.MinReadySeconds = in.MinReadySeconds out.Paused = in.Paused if in.RollbackTo != nil { - out.RollbackTo = new(extensions.RollbackConfig) + out.RollbackTo = new(apps.RollbackConfig) out.RollbackTo.Revision = in.RollbackTo.Revision } else { out.RollbackTo = nil @@ -161,11 +161,11 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *extensionsv return nil } -func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *extensionsv1beta1.DeploymentStrategy, s conversion.Scope) error { +func Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *extensionsv1beta1.DeploymentStrategy, s conversion.Scope) error { out.Type = extensionsv1beta1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = new(extensionsv1beta1.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -174,11 +174,11 @@ func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *ext return nil } -func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *extensionsv1beta1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *extensionsv1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { - out.RollingUpdate = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + out.RollingUpdate = new(apps.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -187,7 +187,7 @@ func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *ext return nil } -func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *extensionsv1beta1.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *extensionsv1beta1.RollingUpdateDeployment, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } @@ -203,7 +203,7 @@ func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployme return nil } -func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *extensionsv1beta1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *extensionsv1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } @@ -213,7 +213,7 @@ func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployme return nil } -func Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *extensionsv1beta1.RollingUpdateDaemonSet, s conversion.Scope) error { +func Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *extensionsv1beta1.RollingUpdateDaemonSet, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } @@ -223,14 +223,14 @@ func Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet return nil } -func Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *extensionsv1beta1.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { +func Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *extensionsv1beta1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } return nil } -func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *extensionsv1beta1.ReplicaSetSpec, s conversion.Scope) error { +func Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *extensionsv1beta1.ReplicaSetSpec, s conversion.Scope) error { out.Replicas = new(int32) *out.Replicas = int32(in.Replicas) out.MinReadySeconds = in.MinReadySeconds @@ -241,7 +241,7 @@ func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions. return nil } -func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *extensionsv1beta1.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *extensionsv1beta1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go index 3138696a518e8..4bb885567b7e2 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go @@ -118,6 +118,12 @@ func SetDefaults_Deployment(obj *extensionsv1beta1.Deployment) { obj.Spec.ProgressDeadlineSeconds = new(int32) *obj.Spec.ProgressDeadlineSeconds = math.MaxInt32 } + // Set extensionsv1beta1.DeploymentSpec.RevisionHistoryLimit to MaxInt32, + // which has the same meaning as unset. + if obj.Spec.RevisionHistoryLimit == nil { + obj.Spec.RevisionHistoryLimit = new(int32) + *obj.Spec.RevisionHistoryLimit = math.MaxInt32 + } } func SetDefaults_ReplicaSet(obj *extensionsv1beta1.ReplicaSet) { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go index 96f579d1ebab7..463bceb1a9a8c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/policy // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go index 1b35486b828d8..3159bdc69e9a2 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go @@ -28,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" + apps "k8s.io/kubernetes/pkg/apis/apps" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" core "k8s.io/kubernetes/pkg/apis/core" corev1 "k8s.io/kubernetes/pkg/apis/core/v1" @@ -63,173 +64,133 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.CustomMetricCurrentStatus)(nil), (*extensions.CustomMetricCurrentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(a.(*v1beta1.CustomMetricCurrentStatus), b.(*extensions.CustomMetricCurrentStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DaemonSet_To_apps_DaemonSet(a.(*v1beta1.DaemonSet), b.(*apps.DaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.CustomMetricCurrentStatus)(nil), (*v1beta1.CustomMetricCurrentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(a.(*extensions.CustomMetricCurrentStatus), b.(*v1beta1.CustomMetricCurrentStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSet)(nil), (*v1beta1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSet_To_v1beta1_DaemonSet(a.(*apps.DaemonSet), b.(*v1beta1.DaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.CustomMetricCurrentStatusList)(nil), (*extensions.CustomMetricCurrentStatusList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(a.(*v1beta1.CustomMetricCurrentStatusList), b.(*extensions.CustomMetricCurrentStatusList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetCondition)(nil), (*apps.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition(a.(*v1beta1.DaemonSetCondition), b.(*apps.DaemonSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.CustomMetricCurrentStatusList)(nil), (*v1beta1.CustomMetricCurrentStatusList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(a.(*extensions.CustomMetricCurrentStatusList), b.(*v1beta1.CustomMetricCurrentStatusList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetCondition)(nil), (*v1beta1.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition(a.(*apps.DaemonSetCondition), b.(*v1beta1.DaemonSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.CustomMetricTarget)(nil), (*extensions.CustomMetricTarget)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(a.(*v1beta1.CustomMetricTarget), b.(*extensions.CustomMetricTarget), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetList)(nil), (*apps.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DaemonSetList_To_apps_DaemonSetList(a.(*v1beta1.DaemonSetList), b.(*apps.DaemonSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.CustomMetricTarget)(nil), (*v1beta1.CustomMetricTarget)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(a.(*extensions.CustomMetricTarget), b.(*v1beta1.CustomMetricTarget), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetList)(nil), (*v1beta1.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetList_To_v1beta1_DaemonSetList(a.(*apps.DaemonSetList), b.(*v1beta1.DaemonSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.CustomMetricTargetList)(nil), (*extensions.CustomMetricTargetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(a.(*v1beta1.CustomMetricTargetList), b.(*extensions.CustomMetricTargetList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1beta1.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.CustomMetricTargetList)(nil), (*v1beta1.CustomMetricTargetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(a.(*extensions.CustomMetricTargetList), b.(*v1beta1.CustomMetricTargetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetSpec)(nil), (*v1beta1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*v1beta1.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSet)(nil), (*extensions.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(a.(*v1beta1.DaemonSet), b.(*extensions.DaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetStatus)(nil), (*apps.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(a.(*v1beta1.DaemonSetStatus), b.(*apps.DaemonSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSet)(nil), (*v1beta1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(a.(*extensions.DaemonSet), b.(*v1beta1.DaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetStatus)(nil), (*v1beta1.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(a.(*apps.DaemonSetStatus), b.(*v1beta1.DaemonSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetCondition)(nil), (*extensions.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition(a.(*v1beta1.DaemonSetCondition), b.(*extensions.DaemonSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*v1beta1.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetCondition)(nil), (*v1beta1.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition(a.(*extensions.DaemonSetCondition), b.(*v1beta1.DaemonSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*v1beta1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*v1beta1.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetList)(nil), (*extensions.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(a.(*v1beta1.DaemonSetList), b.(*extensions.DaemonSetList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Deployment_To_apps_Deployment(a.(*v1beta1.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetList)(nil), (*v1beta1.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(a.(*extensions.DaemonSetList), b.(*v1beta1.DaemonSetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.Deployment)(nil), (*v1beta1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_Deployment_To_v1beta1_Deployment(a.(*apps.Deployment), b.(*v1beta1.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetSpec)(nil), (*extensions.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(a.(*v1beta1.DaemonSetSpec), b.(*extensions.DaemonSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(a.(*v1beta1.DeploymentCondition), b.(*apps.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetSpec)(nil), (*v1beta1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(a.(*extensions.DaemonSetSpec), b.(*v1beta1.DaemonSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentCondition)(nil), (*v1beta1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(a.(*apps.DeploymentCondition), b.(*v1beta1.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetStatus)(nil), (*extensions.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(a.(*v1beta1.DaemonSetStatus), b.(*extensions.DaemonSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentList)(nil), (*apps.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentList_To_apps_DeploymentList(a.(*v1beta1.DeploymentList), b.(*apps.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetStatus)(nil), (*v1beta1.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(a.(*extensions.DaemonSetStatus), b.(*v1beta1.DaemonSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentList)(nil), (*v1beta1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentList_To_v1beta1_DeploymentList(a.(*apps.DeploymentList), b.(*v1beta1.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetUpdateStrategy)(nil), (*extensions.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(a.(*v1beta1.DaemonSetUpdateStrategy), b.(*extensions.DaemonSetUpdateStrategy), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentRollback)(nil), (*apps.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(a.(*v1beta1.DeploymentRollback), b.(*apps.DeploymentRollback), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetUpdateStrategy)(nil), (*v1beta1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(a.(*extensions.DaemonSetUpdateStrategy), b.(*v1beta1.DaemonSetUpdateStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentRollback)(nil), (*v1beta1.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(a.(*apps.DeploymentRollback), b.(*v1beta1.DeploymentRollback), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.Deployment)(nil), (*extensions.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_Deployment_To_extensions_Deployment(a.(*v1beta1.Deployment), b.(*extensions.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.Deployment)(nil), (*v1beta1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_Deployment_To_v1beta1_Deployment(a.(*extensions.Deployment), b.(*v1beta1.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentCondition)(nil), (*extensions.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(a.(*v1beta1.DeploymentCondition), b.(*extensions.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(a.(*v1beta1.DeploymentStatus), b.(*apps.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentCondition)(nil), (*v1beta1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(a.(*extensions.DeploymentCondition), b.(*v1beta1.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStatus)(nil), (*v1beta1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(a.(*apps.DeploymentStatus), b.(*v1beta1.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentList)(nil), (*extensions.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(a.(*v1beta1.DeploymentList), b.(*extensions.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentList)(nil), (*v1beta1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(a.(*extensions.DeploymentList), b.(*v1beta1.DeploymentList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentRollback)(nil), (*extensions.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(a.(*v1beta1.DeploymentRollback), b.(*extensions.DeploymentRollback), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentRollback)(nil), (*v1beta1.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(a.(*extensions.DeploymentRollback), b.(*v1beta1.DeploymentRollback), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStatus)(nil), (*extensions.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(a.(*v1beta1.DeploymentStatus), b.(*extensions.DeploymentStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStatus)(nil), (*v1beta1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(a.(*extensions.DeploymentStatus), b.(*v1beta1.DeploymentStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) }); err != nil { return err } @@ -393,53 +354,53 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSet)(nil), (*extensions.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(a.(*v1beta1.ReplicaSet), b.(*extensions.ReplicaSet), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSet)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ReplicaSet_To_apps_ReplicaSet(a.(*v1beta1.ReplicaSet), b.(*apps.ReplicaSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSet)(nil), (*v1beta1.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(a.(*extensions.ReplicaSet), b.(*v1beta1.ReplicaSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSet)(nil), (*v1beta1.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSet_To_v1beta1_ReplicaSet(a.(*apps.ReplicaSet), b.(*v1beta1.ReplicaSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetCondition)(nil), (*extensions.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(a.(*v1beta1.ReplicaSetCondition), b.(*extensions.ReplicaSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetCondition)(nil), (*apps.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition(a.(*v1beta1.ReplicaSetCondition), b.(*apps.ReplicaSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetCondition)(nil), (*v1beta1.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(a.(*extensions.ReplicaSetCondition), b.(*v1beta1.ReplicaSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetCondition)(nil), (*v1beta1.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(a.(*apps.ReplicaSetCondition), b.(*v1beta1.ReplicaSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetList)(nil), (*extensions.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(a.(*v1beta1.ReplicaSetList), b.(*extensions.ReplicaSetList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetList)(nil), (*apps.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList(a.(*v1beta1.ReplicaSetList), b.(*apps.ReplicaSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetList)(nil), (*v1beta1.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(a.(*extensions.ReplicaSetList), b.(*v1beta1.ReplicaSetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetList)(nil), (*v1beta1.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList(a.(*apps.ReplicaSetList), b.(*v1beta1.ReplicaSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(a.(*v1beta1.ReplicaSetSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1beta1.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1beta1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(a.(*extensions.ReplicaSetSpec), b.(*v1beta1.ReplicaSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1beta1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1beta1.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetStatus)(nil), (*extensions.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(a.(*v1beta1.ReplicaSetStatus), b.(*extensions.ReplicaSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(a.(*v1beta1.ReplicaSetStatus), b.(*apps.ReplicaSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetStatus)(nil), (*v1beta1.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(a.(*extensions.ReplicaSetStatus), b.(*v1beta1.ReplicaSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetStatus)(nil), (*v1beta1.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(a.(*apps.ReplicaSetStatus), b.(*v1beta1.ReplicaSetStatus), scope) }); err != nil { return err } @@ -453,33 +414,43 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.RollbackConfig)(nil), (*extensions.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(a.(*v1beta1.RollbackConfig), b.(*extensions.RollbackConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.RollbackConfig)(nil), (*apps.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(a.(*v1beta1.RollbackConfig), b.(*apps.RollbackConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apps.RollbackConfig)(nil), (*v1beta1.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(a.(*apps.RollbackConfig), b.(*v1beta1.RollbackConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollbackConfig)(nil), (*v1beta1.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(a.(*extensions.RollbackConfig), b.(*v1beta1.RollbackConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1beta1.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDaemonSet)(nil), (*extensions.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(a.(*v1beta1.RollingUpdateDaemonSet), b.(*extensions.RollingUpdateDaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1beta1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1beta1.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDaemonSet)(nil), (*v1beta1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(a.(*extensions.RollingUpdateDaemonSet), b.(*v1beta1.RollingUpdateDaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.RunAsGroupStrategyOptions)(nil), (*policy.RunAsGroupStrategyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(a.(*v1beta1.RunAsGroupStrategyOptions), b.(*policy.RunAsGroupStrategyOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*policy.RunAsGroupStrategyOptions)(nil), (*v1beta1.RunAsGroupStrategyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(a.(*policy.RunAsGroupStrategyOptions), b.(*v1beta1.RunAsGroupStrategyOptions), scope) }); err != nil { return err } @@ -543,33 +514,33 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) + if err := s.AddConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) + if err := s.AddConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1beta1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1beta1.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1beta1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(a.(*extensions.ReplicaSetSpec), b.(*v1beta1.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1beta1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1beta1.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDaemonSet)(nil), (*v1beta1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(a.(*extensions.RollingUpdateDaemonSet), b.(*v1beta1.RollingUpdateDaemonSet), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) }); err != nil { return err } @@ -613,13 +584,13 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddConversionFunc((*v1beta1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } @@ -663,18 +634,18 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.ReplicaSetSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(a.(*v1beta1.ReplicaSetSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*v1beta1.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1beta1.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.RollingUpdateDaemonSet)(nil), (*extensions.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(a.(*v1beta1.RollingUpdateDaemonSet), b.(*extensions.RollingUpdateDaemonSet), scope) + if err := s.AddConversionFunc((*v1beta1.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1beta1.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -728,124 +699,40 @@ func Convert_policy_AllowedHostPath_To_v1beta1_AllowedHostPath(in *policy.Allowe return autoConvert_policy_AllowedHostPath_To_v1beta1_AllowedHostPath(in, out, s) } -func autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *v1beta1.CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { - out.Name = in.Name - out.CurrentValue = in.CurrentValue - return nil -} - -// Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus is an autogenerated conversion function. -func Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *v1beta1.CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in, out, s) -} - -func autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *v1beta1.CustomMetricCurrentStatus, s conversion.Scope) error { - out.Name = in.Name - out.CurrentValue = in.CurrentValue - return nil -} - -// Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus is an autogenerated conversion function. -func Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *v1beta1.CustomMetricCurrentStatus, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *v1beta1.CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { - out.Items = *(*[]extensions.CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList is an autogenerated conversion function. -func Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *v1beta1.CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in, out, s) -} - -func autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *v1beta1.CustomMetricCurrentStatusList, s conversion.Scope) error { - out.Items = *(*[]v1beta1.CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList is an autogenerated conversion function. -func Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *v1beta1.CustomMetricCurrentStatusList, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *v1beta1.CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { - out.Name = in.Name - out.TargetValue = in.TargetValue - return nil -} - -// Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget is an autogenerated conversion function. -func Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *v1beta1.CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in, out, s) -} - -func autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *v1beta1.CustomMetricTarget, s conversion.Scope) error { - out.Name = in.Name - out.TargetValue = in.TargetValue - return nil -} - -// Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget is an autogenerated conversion function. -func Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *v1beta1.CustomMetricTarget, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *v1beta1.CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { - out.Items = *(*[]extensions.CustomMetricTarget)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList is an autogenerated conversion function. -func Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *v1beta1.CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in, out, s) -} - -func autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *v1beta1.CustomMetricTargetList, s conversion.Scope) error { - out.Items = *(*[]v1beta1.CustomMetricTarget)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList is an autogenerated conversion function. -func Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *v1beta1.CustomMetricTargetList, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in, out, s) -} - -func autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *v1beta1.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { +func autoConvert_v1beta1_DaemonSet_To_apps_DaemonSet(in *v1beta1.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta1_DaemonSet_To_extensions_DaemonSet is an autogenerated conversion function. -func Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *v1beta1.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in, out, s) +// Convert_v1beta1_DaemonSet_To_apps_DaemonSet is an autogenerated conversion function. +func Convert_v1beta1_DaemonSet_To_apps_DaemonSet(in *v1beta1.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSet_To_apps_DaemonSet(in, out, s) } -func autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *v1beta1.DaemonSet, s conversion.Scope) error { +func autoConvert_apps_DaemonSet_To_v1beta1_DaemonSet(in *apps.DaemonSet, out *v1beta1.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_DaemonSet_To_v1beta1_DaemonSet is an autogenerated conversion function. -func Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *v1beta1.DaemonSet, s conversion.Scope) error { - return autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in, out, s) +// Convert_apps_DaemonSet_To_v1beta1_DaemonSet is an autogenerated conversion function. +func Convert_apps_DaemonSet_To_v1beta1_DaemonSet(in *apps.DaemonSet, out *v1beta1.DaemonSet, s conversion.Scope) error { + return autoConvert_apps_DaemonSet_To_v1beta1_DaemonSet(in, out, s) } -func autoConvert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1beta1.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { - out.Type = extensions.DaemonSetConditionType(in.Type) +func autoConvert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition(in *v1beta1.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error { + out.Type = apps.DaemonSetConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -853,12 +740,12 @@ func autoConvert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition(in return nil } -// Convert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition is an autogenerated conversion function. -func Convert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1beta1.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition(in, out, s) +// Convert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition is an autogenerated conversion function. +func Convert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition(in *v1beta1.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition(in, out, s) } -func autoConvert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1beta1.DaemonSetCondition, s conversion.Scope) error { +func autoConvert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in *apps.DaemonSetCondition, out *v1beta1.DaemonSetCondition, s conversion.Scope) error { out.Type = v1beta1.DaemonSetConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -867,18 +754,18 @@ func autoConvert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in return nil } -// Convert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition is an autogenerated conversion function. -func Convert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1beta1.DaemonSetCondition, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in, out, s) +// Convert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition is an autogenerated conversion function. +func Convert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in *apps.DaemonSetCondition, out *v1beta1.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in, out, s) } -func autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *v1beta1.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { +func autoConvert_v1beta1_DaemonSetList_To_apps_DaemonSetList(in *v1beta1.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.DaemonSet, len(*in)) + *out = make([]apps.DaemonSet, len(*in)) for i := range *in { - if err := Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta1_DaemonSet_To_apps_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -888,18 +775,18 @@ func autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *v1beta1.D return nil } -// Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList is an autogenerated conversion function. -func Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *v1beta1.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in, out, s) +// Convert_v1beta1_DaemonSetList_To_apps_DaemonSetList is an autogenerated conversion function. +func Convert_v1beta1_DaemonSetList_To_apps_DaemonSetList(in *v1beta1.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetList_To_apps_DaemonSetList(in, out, s) } -func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *v1beta1.DaemonSetList, s conversion.Scope) error { +func autoConvert_apps_DaemonSetList_To_v1beta1_DaemonSetList(in *apps.DaemonSetList, out *v1beta1.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta1.DaemonSet, len(*in)) for i := range *in { - if err := Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_DaemonSet_To_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -909,17 +796,17 @@ func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extension return nil } -// Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList is an autogenerated conversion function. -func Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *v1beta1.DaemonSetList, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in, out, s) +// Convert_apps_DaemonSetList_To_v1beta1_DaemonSetList is an autogenerated conversion function. +func Convert_apps_DaemonSetList_To_v1beta1_DaemonSetList(in *apps.DaemonSetList, out *v1beta1.DaemonSetList, s conversion.Scope) error { + return autoConvert_apps_DaemonSetList_To_v1beta1_DaemonSetList(in, out, s) } -func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta1.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { +func autoConvert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(in *v1beta1.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error { out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -928,17 +815,17 @@ func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta1.D return nil } -// Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec is an autogenerated conversion function. -func Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta1.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in, out, s) +// Convert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec is an autogenerated conversion function. +func Convert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(in *v1beta1.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(in, out, s) } -func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *v1beta1.DaemonSetSpec, s conversion.Scope) error { +func autoConvert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *apps.DaemonSetSpec, out *v1beta1.DaemonSetSpec, s conversion.Scope) error { out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -947,12 +834,12 @@ func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extension return nil } -// Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec is an autogenerated conversion function. -func Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *v1beta1.DaemonSetSpec, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in, out, s) +// Convert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec is an autogenerated conversion function. +func Convert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *apps.DaemonSetSpec, out *v1beta1.DaemonSetSpec, s conversion.Scope) error { + return autoConvert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in, out, s) } -func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1beta1.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { +func autoConvert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(in *v1beta1.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -962,16 +849,16 @@ func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1bet out.NumberAvailable = in.NumberAvailable out.NumberUnavailable = in.NumberUnavailable out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) - out.Conditions = *(*[]extensions.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus is an autogenerated conversion function. -func Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1beta1.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s) +// Convert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus is an autogenerated conversion function. +func Convert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(in *v1beta1.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(in, out, s) } -func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1beta1.DaemonSetStatus, s conversion.Scope) error { +func autoConvert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *apps.DaemonSetStatus, out *v1beta1.DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -985,17 +872,17 @@ func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *exten return nil } -// Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus is an autogenerated conversion function. -func Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1beta1.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s) +// Convert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus is an autogenerated conversion function. +func Convert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *apps.DaemonSetStatus, out *v1beta1.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s) } -func autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *v1beta1.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) +func autoConvert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *v1beta1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = apps.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDaemonSet) - if err := Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDaemonSet) + if err := Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(*in, *out, s); err != nil { return err } } else { @@ -1004,17 +891,17 @@ func autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateSt return nil } -// Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy is an autogenerated conversion function. -func Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *v1beta1.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in, out, s) +// Convert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy is an autogenerated conversion function. +func Convert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *v1beta1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in, out, s) } -func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *v1beta1.DaemonSetUpdateStrategy, s conversion.Scope) error { +func autoConvert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *v1beta1.DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = v1beta1.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1beta1.RollingUpdateDaemonSet) - if err := Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(*in, *out, s); err != nil { return err } } else { @@ -1023,45 +910,45 @@ func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateSt return nil } -// Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy is an autogenerated conversion function. -func Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *v1beta1.DaemonSetUpdateStrategy, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in, out, s) +// Convert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy is an autogenerated conversion function. +func Convert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *v1beta1.DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in, out, s) } -func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *v1beta1.Deployment, out *extensions.Deployment, s conversion.Scope) error { +func autoConvert_v1beta1_Deployment_To_apps_Deployment(in *v1beta1.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta1_Deployment_To_extensions_Deployment is an autogenerated conversion function. -func Convert_v1beta1_Deployment_To_extensions_Deployment(in *v1beta1.Deployment, out *extensions.Deployment, s conversion.Scope) error { - return autoConvert_v1beta1_Deployment_To_extensions_Deployment(in, out, s) +// Convert_v1beta1_Deployment_To_apps_Deployment is an autogenerated conversion function. +func Convert_v1beta1_Deployment_To_apps_Deployment(in *v1beta1.Deployment, out *apps.Deployment, s conversion.Scope) error { + return autoConvert_v1beta1_Deployment_To_apps_Deployment(in, out, s) } -func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { +func autoConvert_apps_Deployment_To_v1beta1_Deployment(in *apps.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_Deployment_To_v1beta1_Deployment is an autogenerated conversion function. -func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { - return autoConvert_extensions_Deployment_To_v1beta1_Deployment(in, out, s) +// Convert_apps_Deployment_To_v1beta1_Deployment is an autogenerated conversion function. +func Convert_apps_Deployment_To_v1beta1_Deployment(in *apps.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { + return autoConvert_apps_Deployment_To_v1beta1_Deployment(in, out, s) } -func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - out.Type = extensions.DeploymentConditionType(in.Type) +func autoConvert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in *v1beta1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + out.Type = apps.DeploymentConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime @@ -1070,12 +957,12 @@ func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(i return nil } -// Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition is an autogenerated conversion function. -func Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s) +// Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition is an autogenerated conversion function. +func Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in *v1beta1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in, out, s) } -func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { +func autoConvert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in *apps.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { out.Type = v1beta1.DeploymentConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime @@ -1085,18 +972,18 @@ func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(i return nil } -// Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition is an autogenerated conversion function. -func Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { - return autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s) +// Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition is an autogenerated conversion function. +func Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in *apps.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { + return autoConvert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s) } -func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentList_To_apps_DeploymentList(in *v1beta1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.Deployment, len(*in)) + *out = make([]apps.Deployment, len(*in)) for i := range *in { - if err := Convert_v1beta1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta1_Deployment_To_apps_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1106,18 +993,18 @@ func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1 return nil } -// Convert_v1beta1_DeploymentList_To_extensions_DeploymentList is an autogenerated conversion function. -func Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in, out, s) +// Convert_v1beta1_DeploymentList_To_apps_DeploymentList is an autogenerated conversion function. +func Convert_v1beta1_DeploymentList_To_apps_DeploymentList(in *v1beta1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentList_To_apps_DeploymentList(in, out, s) } -func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { +func autoConvert_apps_DeploymentList_To_v1beta1_DeploymentList(in *apps.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta1.Deployment, len(*in)) for i := range *in { - if err := Convert_extensions_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1127,40 +1014,40 @@ func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensi return nil } -// Convert_extensions_DeploymentList_To_v1beta1_DeploymentList is an autogenerated conversion function. -func Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { - return autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in, out, s) +// Convert_apps_DeploymentList_To_v1beta1_DeploymentList is an autogenerated conversion function. +func Convert_apps_DeploymentList_To_v1beta1_DeploymentList(in *apps.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { + return autoConvert_apps_DeploymentList_To_v1beta1_DeploymentList(in, out, s) } -func autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta1.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in *v1beta1.DeploymentRollback, out *apps.DeploymentRollback, s conversion.Scope) error { out.Name = in.Name out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) - if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { + if err := Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { return err } return nil } -// Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback is an autogenerated conversion function. -func Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta1.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s) +// Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback is an autogenerated conversion function. +func Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in *v1beta1.DeploymentRollback, out *apps.DeploymentRollback, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in, out, s) } -func autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { +func autoConvert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in *apps.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { out.Name = in.Name out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) - if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { + if err := Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { return err } return nil } -// Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback is an autogenerated conversion function. -func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { - return autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) +// Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback is an autogenerated conversion function. +func Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in *apps.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { + return autoConvert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) } -func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *v1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1168,18 +1055,18 @@ func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta1 if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) out.Paused = in.Paused - out.RollbackTo = (*extensions.RollbackConfig)(unsafe.Pointer(in.RollbackTo)) + out.RollbackTo = (*apps.RollbackConfig)(unsafe.Pointer(in.RollbackTo)) out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) return nil } -func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *v1beta1.DeploymentSpec, s conversion.Scope) error { +func autoConvert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *v1beta1.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1187,7 +1074,7 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensi if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -1198,24 +1085,24 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensi return nil } -func autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in *v1beta1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.UnavailableReplicas = in.UnavailableReplicas - out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) return nil } -// Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus is an autogenerated conversion function. -func Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) +// Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus is an autogenerated conversion function. +func Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in *v1beta1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in, out, s) } -func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { +func autoConvert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in *apps.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas @@ -1227,17 +1114,17 @@ func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *ext return nil } -// Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus is an autogenerated conversion function. -func Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { - return autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) +// Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus is an autogenerated conversion function. +func Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in *apps.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { + return autoConvert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) } -func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *v1beta1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func autoConvert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -1246,12 +1133,12 @@ func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in return nil } -func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *v1beta1.DeploymentStrategy, s conversion.Scope) error { +func autoConvert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1beta1.DeploymentStrategy, s conversion.Scope) error { out.Type = v1beta1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1beta1.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -1650,6 +1537,7 @@ func autoConvert_v1beta1_PodSecurityPolicySpec_To_policy_PodSecurityPolicySpec(i if err := Convert_v1beta1_RunAsUserStrategyOptions_To_policy_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { return err } + out.RunAsGroup = (*policy.RunAsGroupStrategyOptions)(unsafe.Pointer(in.RunAsGroup)) if err := Convert_v1beta1_SupplementalGroupsStrategyOptions_To_policy_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { return err } @@ -1690,6 +1578,7 @@ func autoConvert_policy_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(i if err := Convert_policy_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { return err } + out.RunAsGroup = (*v1beta1.RunAsGroupStrategyOptions)(unsafe.Pointer(in.RunAsGroup)) if err := Convert_policy_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { return err } @@ -1714,40 +1603,40 @@ func Convert_policy_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *p return autoConvert_policy_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in, out, s) } -func autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *v1beta1.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { +func autoConvert_v1beta1_ReplicaSet_To_apps_ReplicaSet(in *v1beta1.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet is an autogenerated conversion function. -func Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *v1beta1.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in, out, s) +// Convert_v1beta1_ReplicaSet_To_apps_ReplicaSet is an autogenerated conversion function. +func Convert_v1beta1_ReplicaSet_To_apps_ReplicaSet(in *v1beta1.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSet_To_apps_ReplicaSet(in, out, s) } -func autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *v1beta1.ReplicaSet, s conversion.Scope) error { +func autoConvert_apps_ReplicaSet_To_v1beta1_ReplicaSet(in *apps.ReplicaSet, out *v1beta1.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet is an autogenerated conversion function. -func Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *v1beta1.ReplicaSet, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in, out, s) +// Convert_apps_ReplicaSet_To_v1beta1_ReplicaSet is an autogenerated conversion function. +func Convert_apps_ReplicaSet_To_v1beta1_ReplicaSet(in *apps.ReplicaSet, out *v1beta1.ReplicaSet, s conversion.Scope) error { + return autoConvert_apps_ReplicaSet_To_v1beta1_ReplicaSet(in, out, s) } -func autoConvert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1beta1.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - out.Type = extensions.ReplicaSetConditionType(in.Type) +func autoConvert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *v1beta1.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error { + out.Type = apps.ReplicaSetConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -1755,12 +1644,12 @@ func autoConvert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(i return nil } -// Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition is an autogenerated conversion function. -func Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1beta1.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in, out, s) +// Convert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition is an autogenerated conversion function. +func Convert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *v1beta1.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in, out, s) } -func autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1beta1.ReplicaSetCondition, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *v1beta1.ReplicaSetCondition, s conversion.Scope) error { out.Type = v1beta1.ReplicaSetConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -1769,18 +1658,18 @@ func autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(i return nil } -// Convert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition is an autogenerated conversion function. -func Convert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1beta1.ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in, out, s) +// Convert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition is an autogenerated conversion function. +func Convert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *v1beta1.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in, out, s) } -func autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta1.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { +func autoConvert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList(in *v1beta1.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.ReplicaSet, len(*in)) + *out = make([]apps.ReplicaSet, len(*in)) for i := range *in { - if err := Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta1_ReplicaSet_To_apps_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1790,18 +1679,18 @@ func autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta1 return nil } -// Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList is an autogenerated conversion function. -func Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta1.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s) +// Convert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList is an autogenerated conversion function. +func Convert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList(in *v1beta1.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList(in, out, s) } -func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *v1beta1.ReplicaSetList, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList(in *apps.ReplicaSetList, out *v1beta1.ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta1.ReplicaSet, len(*in)) for i := range *in { - if err := Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_ReplicaSet_To_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1811,12 +1700,12 @@ func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensi return nil } -// Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList is an autogenerated conversion function. -func Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *v1beta1.ReplicaSetList, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in, out, s) +// Convert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList is an autogenerated conversion function. +func Convert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList(in *apps.ReplicaSetList, out *v1beta1.ReplicaSetList, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList(in, out, s) } -func autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1beta1.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func autoConvert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *v1beta1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1828,7 +1717,7 @@ func autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1beta1 return nil } -func autoConvert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *v1beta1.ReplicaSetSpec, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *v1beta1.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1840,22 +1729,22 @@ func autoConvert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensi return nil } -func autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1beta1.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { +func autoConvert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1beta1.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]extensions.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus is an autogenerated conversion function. -func Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1beta1.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s) +// Convert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus is an autogenerated conversion function. +func Convert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1beta1.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in, out, s) } -func autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1beta1.ReplicaSetStatus, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *v1beta1.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas @@ -1865,9 +1754,9 @@ func autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *ext return nil } -// Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus is an autogenerated conversion function. -func Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1beta1.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in, out, s) +// Convert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus is an autogenerated conversion function. +func Convert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *v1beta1.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in, out, s) } func autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in *v1beta1.ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error { @@ -1888,48 +1777,70 @@ func Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControl return autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in, out, s) } -func autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *v1beta1.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { +func autoConvert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in *v1beta1.RollbackConfig, out *apps.RollbackConfig, s conversion.Scope) error { out.Revision = in.Revision return nil } -// Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig is an autogenerated conversion function. -func Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *v1beta1.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { - return autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in, out, s) +// Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig is an autogenerated conversion function. +func Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in *v1beta1.RollbackConfig, out *apps.RollbackConfig, s conversion.Scope) error { + return autoConvert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in, out, s) } -func autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { +func autoConvert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { out.Revision = in.Revision return nil } -// Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig is an autogenerated conversion function. -func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { - return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) +// Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig is an autogenerated conversion function. +func Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { + return autoConvert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) } -func autoConvert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *v1beta1.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { +func autoConvert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *v1beta1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *v1beta1.RollingUpdateDaemonSet, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *v1beta1.RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *v1beta1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *v1beta1.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1beta1.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } +func autoConvert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(in *v1beta1.RunAsGroupStrategyOptions, out *policy.RunAsGroupStrategyOptions, s conversion.Scope) error { + out.Rule = policy.RunAsGroupStrategy(in.Rule) + out.Ranges = *(*[]policy.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +// Convert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions is an autogenerated conversion function. +func Convert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(in *v1beta1.RunAsGroupStrategyOptions, out *policy.RunAsGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(in, out, s) +} + +func autoConvert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(in *policy.RunAsGroupStrategyOptions, out *v1beta1.RunAsGroupStrategyOptions, s conversion.Scope) error { + out.Rule = v1beta1.RunAsGroupStrategy(in.Rule) + out.Ranges = *(*[]v1beta1.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +// Convert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions is an autogenerated conversion function. +func Convert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(in *policy.RunAsGroupStrategyOptions, out *v1beta1.RunAsGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(in, out, s) +} + func autoConvert_v1beta1_RunAsUserStrategyOptions_To_policy_RunAsUserStrategyOptions(in *v1beta1.RunAsUserStrategyOptions, out *policy.RunAsUserStrategyOptions, s conversion.Scope) error { out.Rule = policy.RunAsUserStrategy(in.Rule) out.Ranges = *(*[]policy.IDRange)(unsafe.Pointer(&in.Ranges)) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go index befc3a71d63f9..390f43280798f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go @@ -21,444 +21,9 @@ limitations under the License. package extensions import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricCurrentStatus) DeepCopyInto(out *CustomMetricCurrentStatus) { - *out = *in - out.CurrentValue = in.CurrentValue.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatus. -func (in *CustomMetricCurrentStatus) DeepCopy() *CustomMetricCurrentStatus { - if in == nil { - return nil - } - out := new(CustomMetricCurrentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricCurrentStatusList) DeepCopyInto(out *CustomMetricCurrentStatusList) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricCurrentStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatusList. -func (in *CustomMetricCurrentStatusList) DeepCopy() *CustomMetricCurrentStatusList { - if in == nil { - return nil - } - out := new(CustomMetricCurrentStatusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricTarget) DeepCopyInto(out *CustomMetricTarget) { - *out = *in - out.TargetValue = in.TargetValue.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTarget. -func (in *CustomMetricTarget) DeepCopy() *CustomMetricTarget { - if in == nil { - return nil - } - out := new(CustomMetricTarget) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricTargetList) DeepCopyInto(out *CustomMetricTargetList) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricTarget, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTargetList. -func (in *CustomMetricTargetList) DeepCopy() *CustomMetricTargetList { - if in == nil { - return nil - } - out := new(CustomMetricTargetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. -func (in *DaemonSet) DeepCopy() *DaemonSet { - if in == nil { - return nil - } - out := new(DaemonSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DaemonSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. -func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { - if in == nil { - return nil - } - out := new(DaemonSetCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DaemonSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. -func (in *DaemonSetList) DeepCopy() *DaemonSetList { - if in == nil { - return nil - } - out := new(DaemonSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DaemonSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. -func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { - if in == nil { - return nil - } - out := new(DaemonSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { - *out = *in - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DaemonSetCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. -func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { - if in == nil { - return nil - } - out := new(DaemonSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDaemonSet) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. -func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { - if in == nil { - return nil - } - out := new(DaemonSetUpdateStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Deployment) DeepCopyInto(out *Deployment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. -func (in *Deployment) DeepCopy() *Deployment { - if in == nil { - return nil - } - out := new(Deployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Deployment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { - *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. -func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { - if in == nil { - return nil - } - out := new(DeploymentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Deployment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. -func (in *DeploymentList) DeepCopy() *DeploymentList { - if in == nil { - return nil - } - out := new(DeploymentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeploymentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.UpdatedAnnotations != nil { - in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.RollbackTo = in.RollbackTo - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback. -func (in *DeploymentRollback) DeepCopy() *DeploymentRollback { - if in == nil { - return nil - } - out := new(DeploymentRollback) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeploymentRollback) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - in.Strategy.DeepCopyInto(&out.Strategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - if in.RollbackTo != nil { - in, out := &in.RollbackTo, &out.RollbackTo - *out = new(RollbackConfig) - **out = **in - } - if in.ProgressDeadlineSeconds != nil { - in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. -func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { - if in == nil { - return nil - } - out := new(DeploymentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DeploymentCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. -func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { - if in == nil { - return nil - } - out := new(DeploymentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDeployment) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. -func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { - if in == nil { - return nil - } - out := new(DeploymentStrategy) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in @@ -686,129 +251,6 @@ func (in *IngressTLS) DeepCopy() *IngressTLS { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. -func (in *ReplicaSet) DeepCopy() *ReplicaSet { - if in == nil { - return nil - } - out := new(ReplicaSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicaSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. -func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { - if in == nil { - return nil - } - out := new(ReplicaSetCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicaSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. -func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { - if in == nil { - return nil - } - out := new(ReplicaSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicaSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. -func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { - if in == nil { - return nil - } - out := new(ReplicaSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicaSetCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. -func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { - if in == nil { - return nil - } - out := new(ReplicaSetStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicationControllerDummy) DeepCopyInto(out *ReplicationControllerDummy) { *out = *in @@ -833,54 +275,3 @@ func (in *ReplicationControllerDummy) DeepCopyObject() runtime.Object { } return nil } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig. -func (in *RollbackConfig) DeepCopy() *RollbackConfig { - if in == nil { - return nil - } - out := new(RollbackConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { - *out = *in - out.MaxUnavailable = in.MaxUnavailable - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. -func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { - if in == nil { - return nil - } - out := new(RollingUpdateDaemonSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { - *out = *in - out.MaxUnavailable = in.MaxUnavailable - out.MaxSurge = in.MaxSurge - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. -func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { - if in == nil { - return nil - } - out := new(RollingUpdateDeployment) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go index 8b013e34867dc..218cd6c2f78f4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=networking.k8s.io + package networking // import "k8s.io/kubernetes/pkg/apis/networking" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/install/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/apis/networking/install/BUILD.bazel deleted file mode 100644 index 4d879b7de6c0b..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/install/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/apis/networking/install", - importpath = "k8s.io/kubernetes/pkg/apis/networking/install", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/networking:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/networking/v1:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/install/install.go deleted file mode 100644 index 4cef1acaffc0f..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/install/install.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/apis/networking" - "k8s.io/kubernetes/pkg/apis/networking/v1" -) - -func init() { - Install(legacyscheme.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(scheme *runtime.Scheme) { - utilruntime.Must(networking.AddToScheme(scheme)) - utilruntime.Must(v1.AddToScheme(scheme)) - utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion)) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/BUILD.bazel deleted file mode 100644 index 94b9d50c773d3..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "doc.go", - "register.go", - "zz_generated.conversion.go", - "zz_generated.defaults.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/apis/networking/v1", - importpath = "k8s.io/kubernetes/pkg/apis/networking/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/networking/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/networking:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/defaults.go deleted file mode 100644 index fce71ce711416..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/defaults.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - -func SetDefaults_NetworkPolicyPort(obj *networkingv1.NetworkPolicyPort) { - // Default any undefined Protocol fields to TCP. - if obj.Protocol == nil { - proto := v1.ProtocolTCP - obj.Protocol = &proto - } -} - -func SetDefaults_NetworkPolicy(obj *networkingv1.NetworkPolicy) { - if len(obj.Spec.PolicyTypes) == 0 { - // Any policy that does not specify policyTypes implies at least "Ingress". - obj.Spec.PolicyTypes = []networkingv1.PolicyType{networkingv1.PolicyTypeIngress} - if len(obj.Spec.Egress) != 0 { - obj.Spec.PolicyTypes = append(obj.Spec.PolicyTypes, networkingv1.PolicyTypeEgress) - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/doc.go deleted file mode 100644 index f53cbf3dc65b1..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/networking -// +k8s:conversion-gen-external-types=k8s.io/api/networking/v1 -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions -// +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/networking/v1 -// +groupName=networking.k8s.io -package v1 // import "k8s.io/kubernetes/pkg/apis/networking/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/zz_generated.conversion.go deleted file mode 100644 index fb1b7dbf7ab7a..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/zz_generated.conversion.go +++ /dev/null @@ -1,310 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1 - -import ( - unsafe "unsafe" - - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" - core "k8s.io/kubernetes/pkg/apis/core" - networking "k8s.io/kubernetes/pkg/apis/networking" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1.IPBlock)(nil), (*networking.IPBlock)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_IPBlock_To_networking_IPBlock(a.(*v1.IPBlock), b.(*networking.IPBlock), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.IPBlock)(nil), (*v1.IPBlock)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_IPBlock_To_v1_IPBlock(a.(*networking.IPBlock), b.(*v1.IPBlock), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.NetworkPolicy)(nil), (*networking.NetworkPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NetworkPolicy_To_networking_NetworkPolicy(a.(*v1.NetworkPolicy), b.(*networking.NetworkPolicy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicy)(nil), (*v1.NetworkPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicy_To_v1_NetworkPolicy(a.(*networking.NetworkPolicy), b.(*v1.NetworkPolicy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.NetworkPolicyEgressRule)(nil), (*networking.NetworkPolicyEgressRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(a.(*v1.NetworkPolicyEgressRule), b.(*networking.NetworkPolicyEgressRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyEgressRule)(nil), (*v1.NetworkPolicyEgressRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicyEgressRule_To_v1_NetworkPolicyEgressRule(a.(*networking.NetworkPolicyEgressRule), b.(*v1.NetworkPolicyEgressRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.NetworkPolicyIngressRule)(nil), (*networking.NetworkPolicyIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(a.(*v1.NetworkPolicyIngressRule), b.(*networking.NetworkPolicyIngressRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyIngressRule)(nil), (*v1.NetworkPolicyIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicyIngressRule_To_v1_NetworkPolicyIngressRule(a.(*networking.NetworkPolicyIngressRule), b.(*v1.NetworkPolicyIngressRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.NetworkPolicyList)(nil), (*networking.NetworkPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NetworkPolicyList_To_networking_NetworkPolicyList(a.(*v1.NetworkPolicyList), b.(*networking.NetworkPolicyList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyList)(nil), (*v1.NetworkPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicyList_To_v1_NetworkPolicyList(a.(*networking.NetworkPolicyList), b.(*v1.NetworkPolicyList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.NetworkPolicyPeer)(nil), (*networking.NetworkPolicyPeer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(a.(*v1.NetworkPolicyPeer), b.(*networking.NetworkPolicyPeer), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyPeer)(nil), (*v1.NetworkPolicyPeer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicyPeer_To_v1_NetworkPolicyPeer(a.(*networking.NetworkPolicyPeer), b.(*v1.NetworkPolicyPeer), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.NetworkPolicyPort)(nil), (*networking.NetworkPolicyPort)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NetworkPolicyPort_To_networking_NetworkPolicyPort(a.(*v1.NetworkPolicyPort), b.(*networking.NetworkPolicyPort), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyPort)(nil), (*v1.NetworkPolicyPort)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicyPort_To_v1_NetworkPolicyPort(a.(*networking.NetworkPolicyPort), b.(*v1.NetworkPolicyPort), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.NetworkPolicySpec)(nil), (*networking.NetworkPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NetworkPolicySpec_To_networking_NetworkPolicySpec(a.(*v1.NetworkPolicySpec), b.(*networking.NetworkPolicySpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicySpec)(nil), (*v1.NetworkPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_NetworkPolicySpec_To_v1_NetworkPolicySpec(a.(*networking.NetworkPolicySpec), b.(*v1.NetworkPolicySpec), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1_IPBlock_To_networking_IPBlock(in *v1.IPBlock, out *networking.IPBlock, s conversion.Scope) error { - out.CIDR = in.CIDR - out.Except = *(*[]string)(unsafe.Pointer(&in.Except)) - return nil -} - -// Convert_v1_IPBlock_To_networking_IPBlock is an autogenerated conversion function. -func Convert_v1_IPBlock_To_networking_IPBlock(in *v1.IPBlock, out *networking.IPBlock, s conversion.Scope) error { - return autoConvert_v1_IPBlock_To_networking_IPBlock(in, out, s) -} - -func autoConvert_networking_IPBlock_To_v1_IPBlock(in *networking.IPBlock, out *v1.IPBlock, s conversion.Scope) error { - out.CIDR = in.CIDR - out.Except = *(*[]string)(unsafe.Pointer(&in.Except)) - return nil -} - -// Convert_networking_IPBlock_To_v1_IPBlock is an autogenerated conversion function. -func Convert_networking_IPBlock_To_v1_IPBlock(in *networking.IPBlock, out *v1.IPBlock, s conversion.Scope) error { - return autoConvert_networking_IPBlock_To_v1_IPBlock(in, out, s) -} - -func autoConvert_v1_NetworkPolicy_To_networking_NetworkPolicy(in *v1.NetworkPolicy, out *networking.NetworkPolicy, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_NetworkPolicySpec_To_networking_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1_NetworkPolicy_To_networking_NetworkPolicy is an autogenerated conversion function. -func Convert_v1_NetworkPolicy_To_networking_NetworkPolicy(in *v1.NetworkPolicy, out *networking.NetworkPolicy, s conversion.Scope) error { - return autoConvert_v1_NetworkPolicy_To_networking_NetworkPolicy(in, out, s) -} - -func autoConvert_networking_NetworkPolicy_To_v1_NetworkPolicy(in *networking.NetworkPolicy, out *v1.NetworkPolicy, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_networking_NetworkPolicySpec_To_v1_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_networking_NetworkPolicy_To_v1_NetworkPolicy is an autogenerated conversion function. -func Convert_networking_NetworkPolicy_To_v1_NetworkPolicy(in *networking.NetworkPolicy, out *v1.NetworkPolicy, s conversion.Scope) error { - return autoConvert_networking_NetworkPolicy_To_v1_NetworkPolicy(in, out, s) -} - -func autoConvert_v1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(in *v1.NetworkPolicyEgressRule, out *networking.NetworkPolicyEgressRule, s conversion.Scope) error { - out.Ports = *(*[]networking.NetworkPolicyPort)(unsafe.Pointer(&in.Ports)) - out.To = *(*[]networking.NetworkPolicyPeer)(unsafe.Pointer(&in.To)) - return nil -} - -// Convert_v1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule is an autogenerated conversion function. -func Convert_v1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(in *v1.NetworkPolicyEgressRule, out *networking.NetworkPolicyEgressRule, s conversion.Scope) error { - return autoConvert_v1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(in, out, s) -} - -func autoConvert_networking_NetworkPolicyEgressRule_To_v1_NetworkPolicyEgressRule(in *networking.NetworkPolicyEgressRule, out *v1.NetworkPolicyEgressRule, s conversion.Scope) error { - out.Ports = *(*[]v1.NetworkPolicyPort)(unsafe.Pointer(&in.Ports)) - out.To = *(*[]v1.NetworkPolicyPeer)(unsafe.Pointer(&in.To)) - return nil -} - -// Convert_networking_NetworkPolicyEgressRule_To_v1_NetworkPolicyEgressRule is an autogenerated conversion function. -func Convert_networking_NetworkPolicyEgressRule_To_v1_NetworkPolicyEgressRule(in *networking.NetworkPolicyEgressRule, out *v1.NetworkPolicyEgressRule, s conversion.Scope) error { - return autoConvert_networking_NetworkPolicyEgressRule_To_v1_NetworkPolicyEgressRule(in, out, s) -} - -func autoConvert_v1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(in *v1.NetworkPolicyIngressRule, out *networking.NetworkPolicyIngressRule, s conversion.Scope) error { - out.Ports = *(*[]networking.NetworkPolicyPort)(unsafe.Pointer(&in.Ports)) - out.From = *(*[]networking.NetworkPolicyPeer)(unsafe.Pointer(&in.From)) - return nil -} - -// Convert_v1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule is an autogenerated conversion function. -func Convert_v1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(in *v1.NetworkPolicyIngressRule, out *networking.NetworkPolicyIngressRule, s conversion.Scope) error { - return autoConvert_v1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(in, out, s) -} - -func autoConvert_networking_NetworkPolicyIngressRule_To_v1_NetworkPolicyIngressRule(in *networking.NetworkPolicyIngressRule, out *v1.NetworkPolicyIngressRule, s conversion.Scope) error { - out.Ports = *(*[]v1.NetworkPolicyPort)(unsafe.Pointer(&in.Ports)) - out.From = *(*[]v1.NetworkPolicyPeer)(unsafe.Pointer(&in.From)) - return nil -} - -// Convert_networking_NetworkPolicyIngressRule_To_v1_NetworkPolicyIngressRule is an autogenerated conversion function. -func Convert_networking_NetworkPolicyIngressRule_To_v1_NetworkPolicyIngressRule(in *networking.NetworkPolicyIngressRule, out *v1.NetworkPolicyIngressRule, s conversion.Scope) error { - return autoConvert_networking_NetworkPolicyIngressRule_To_v1_NetworkPolicyIngressRule(in, out, s) -} - -func autoConvert_v1_NetworkPolicyList_To_networking_NetworkPolicyList(in *v1.NetworkPolicyList, out *networking.NetworkPolicyList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]networking.NetworkPolicy)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_NetworkPolicyList_To_networking_NetworkPolicyList is an autogenerated conversion function. -func Convert_v1_NetworkPolicyList_To_networking_NetworkPolicyList(in *v1.NetworkPolicyList, out *networking.NetworkPolicyList, s conversion.Scope) error { - return autoConvert_v1_NetworkPolicyList_To_networking_NetworkPolicyList(in, out, s) -} - -func autoConvert_networking_NetworkPolicyList_To_v1_NetworkPolicyList(in *networking.NetworkPolicyList, out *v1.NetworkPolicyList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.NetworkPolicy)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_networking_NetworkPolicyList_To_v1_NetworkPolicyList is an autogenerated conversion function. -func Convert_networking_NetworkPolicyList_To_v1_NetworkPolicyList(in *networking.NetworkPolicyList, out *v1.NetworkPolicyList, s conversion.Scope) error { - return autoConvert_networking_NetworkPolicyList_To_v1_NetworkPolicyList(in, out, s) -} - -func autoConvert_v1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(in *v1.NetworkPolicyPeer, out *networking.NetworkPolicyPeer, s conversion.Scope) error { - out.PodSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.PodSelector)) - out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) - out.IPBlock = (*networking.IPBlock)(unsafe.Pointer(in.IPBlock)) - return nil -} - -// Convert_v1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer is an autogenerated conversion function. -func Convert_v1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(in *v1.NetworkPolicyPeer, out *networking.NetworkPolicyPeer, s conversion.Scope) error { - return autoConvert_v1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(in, out, s) -} - -func autoConvert_networking_NetworkPolicyPeer_To_v1_NetworkPolicyPeer(in *networking.NetworkPolicyPeer, out *v1.NetworkPolicyPeer, s conversion.Scope) error { - out.PodSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.PodSelector)) - out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) - out.IPBlock = (*v1.IPBlock)(unsafe.Pointer(in.IPBlock)) - return nil -} - -// Convert_networking_NetworkPolicyPeer_To_v1_NetworkPolicyPeer is an autogenerated conversion function. -func Convert_networking_NetworkPolicyPeer_To_v1_NetworkPolicyPeer(in *networking.NetworkPolicyPeer, out *v1.NetworkPolicyPeer, s conversion.Scope) error { - return autoConvert_networking_NetworkPolicyPeer_To_v1_NetworkPolicyPeer(in, out, s) -} - -func autoConvert_v1_NetworkPolicyPort_To_networking_NetworkPolicyPort(in *v1.NetworkPolicyPort, out *networking.NetworkPolicyPort, s conversion.Scope) error { - out.Protocol = (*core.Protocol)(unsafe.Pointer(in.Protocol)) - out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port)) - return nil -} - -// Convert_v1_NetworkPolicyPort_To_networking_NetworkPolicyPort is an autogenerated conversion function. -func Convert_v1_NetworkPolicyPort_To_networking_NetworkPolicyPort(in *v1.NetworkPolicyPort, out *networking.NetworkPolicyPort, s conversion.Scope) error { - return autoConvert_v1_NetworkPolicyPort_To_networking_NetworkPolicyPort(in, out, s) -} - -func autoConvert_networking_NetworkPolicyPort_To_v1_NetworkPolicyPort(in *networking.NetworkPolicyPort, out *v1.NetworkPolicyPort, s conversion.Scope) error { - out.Protocol = (*corev1.Protocol)(unsafe.Pointer(in.Protocol)) - out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port)) - return nil -} - -// Convert_networking_NetworkPolicyPort_To_v1_NetworkPolicyPort is an autogenerated conversion function. -func Convert_networking_NetworkPolicyPort_To_v1_NetworkPolicyPort(in *networking.NetworkPolicyPort, out *v1.NetworkPolicyPort, s conversion.Scope) error { - return autoConvert_networking_NetworkPolicyPort_To_v1_NetworkPolicyPort(in, out, s) -} - -func autoConvert_v1_NetworkPolicySpec_To_networking_NetworkPolicySpec(in *v1.NetworkPolicySpec, out *networking.NetworkPolicySpec, s conversion.Scope) error { - out.PodSelector = in.PodSelector - out.Ingress = *(*[]networking.NetworkPolicyIngressRule)(unsafe.Pointer(&in.Ingress)) - out.Egress = *(*[]networking.NetworkPolicyEgressRule)(unsafe.Pointer(&in.Egress)) - out.PolicyTypes = *(*[]networking.PolicyType)(unsafe.Pointer(&in.PolicyTypes)) - return nil -} - -// Convert_v1_NetworkPolicySpec_To_networking_NetworkPolicySpec is an autogenerated conversion function. -func Convert_v1_NetworkPolicySpec_To_networking_NetworkPolicySpec(in *v1.NetworkPolicySpec, out *networking.NetworkPolicySpec, s conversion.Scope) error { - return autoConvert_v1_NetworkPolicySpec_To_networking_NetworkPolicySpec(in, out, s) -} - -func autoConvert_networking_NetworkPolicySpec_To_v1_NetworkPolicySpec(in *networking.NetworkPolicySpec, out *v1.NetworkPolicySpec, s conversion.Scope) error { - out.PodSelector = in.PodSelector - out.Ingress = *(*[]v1.NetworkPolicyIngressRule)(unsafe.Pointer(&in.Ingress)) - out.Egress = *(*[]v1.NetworkPolicyEgressRule)(unsafe.Pointer(&in.Egress)) - out.PolicyTypes = *(*[]v1.PolicyType)(unsafe.Pointer(&in.PolicyTypes)) - return nil -} - -// Convert_networking_NetworkPolicySpec_To_v1_NetworkPolicySpec is an autogenerated conversion function. -func Convert_networking_NetworkPolicySpec_To_v1_NetworkPolicySpec(in *networking.NetworkPolicySpec, out *v1.NetworkPolicySpec, s conversion.Scope) error { - return autoConvert_networking_NetworkPolicySpec_To_v1_NetworkPolicySpec(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/zz_generated.defaults.go deleted file mode 100644 index 3962a1376b09f..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/zz_generated.defaults.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/networking/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&v1.NetworkPolicy{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicy(obj.(*v1.NetworkPolicy)) }) - scheme.AddTypeDefaultingFunc(&v1.NetworkPolicyList{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicyList(obj.(*v1.NetworkPolicyList)) }) - return nil -} - -func SetObjectDefaults_NetworkPolicy(in *v1.NetworkPolicy) { - SetDefaults_NetworkPolicy(in) - for i := range in.Spec.Ingress { - a := &in.Spec.Ingress[i] - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_NetworkPolicyPort(b) - } - } - for i := range in.Spec.Egress { - a := &in.Spec.Egress[i] - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_NetworkPolicyPort(b) - } - } -} - -func SetObjectDefaults_NetworkPolicyList(in *v1.NetworkPolicyList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_NetworkPolicy(a) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/policy/OWNERS index a245fde358e92..5780104890ded 100755 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/OWNERS @@ -1,8 +1,8 @@ -approvers: -- sig-apps-api-approvers +# approval on api packages bubbles to api-approvers reviewers: -- sig-apps-reviewers -- pweil- -- liggitt -- tallclair -- php-coder +- sig-apps-api-approvers +- sig-auth-policy-approvers +- sig-auth-policy-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go index 7b9628657f133..a94711bdb36c6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go @@ -182,6 +182,10 @@ type PodSecurityPolicySpec struct { SELinux SELinuxStrategyOptions // RunAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + RunAsGroup *RunAsGroupStrategyOptions // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. SupplementalGroups SupplementalGroupsStrategyOptions // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. @@ -335,6 +339,16 @@ type RunAsUserStrategyOptions struct { Ranges []IDRange } +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsGroupStrategyOptions struct { + // Rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + Rule RunAsGroupStrategy + // Ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange +} + // IDRange provides a min/max of an allowed range of IDs. type IDRange struct { // Min is the start of the range, inclusive. @@ -356,6 +370,20 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) +// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a +// SecurityContext. +type RunAsGroupStrategy string + +const ( + // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when RunAsGroup are specified, they have to fall in the defined range. + RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" + // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. + RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" + // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. + RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" +) + // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. type FSGroupStrategyOptions struct { // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. @@ -372,6 +400,9 @@ type FSGroupStrategyOptions struct { type FSGroupStrategyType string const ( + // FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied. + // However, when FSGroups are specified, they have to fall in the defined range. + FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs" // FSGroupStrategyMustRunAs means that container must have FSGroup of X applied. FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. @@ -394,6 +425,9 @@ type SupplementalGroupsStrategyOptions struct { type SupplementalGroupsStrategyType string const ( + // SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when gids are specified, they have to fall in the defined range. + SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs" // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.conversion.go index 572cbfe1b0faa..356cb47f3a4f5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.conversion.go @@ -170,6 +170,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*v1beta1.RunAsGroupStrategyOptions)(nil), (*policy.RunAsGroupStrategyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(a.(*v1beta1.RunAsGroupStrategyOptions), b.(*policy.RunAsGroupStrategyOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*policy.RunAsGroupStrategyOptions)(nil), (*v1beta1.RunAsGroupStrategyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(a.(*policy.RunAsGroupStrategyOptions), b.(*v1beta1.RunAsGroupStrategyOptions), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*v1beta1.RunAsUserStrategyOptions)(nil), (*policy.RunAsUserStrategyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_RunAsUserStrategyOptions_To_policy_RunAsUserStrategyOptions(a.(*v1beta1.RunAsUserStrategyOptions), b.(*policy.RunAsUserStrategyOptions), scope) }); err != nil { @@ -525,6 +535,7 @@ func autoConvert_v1beta1_PodSecurityPolicySpec_To_policy_PodSecurityPolicySpec(i if err := Convert_v1beta1_RunAsUserStrategyOptions_To_policy_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { return err } + out.RunAsGroup = (*policy.RunAsGroupStrategyOptions)(unsafe.Pointer(in.RunAsGroup)) if err := Convert_v1beta1_SupplementalGroupsStrategyOptions_To_policy_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { return err } @@ -565,6 +576,7 @@ func autoConvert_policy_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(i if err := Convert_policy_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { return err } + out.RunAsGroup = (*v1beta1.RunAsGroupStrategyOptions)(unsafe.Pointer(in.RunAsGroup)) if err := Convert_policy_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { return err } @@ -589,6 +601,28 @@ func Convert_policy_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *p return autoConvert_policy_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in, out, s) } +func autoConvert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(in *v1beta1.RunAsGroupStrategyOptions, out *policy.RunAsGroupStrategyOptions, s conversion.Scope) error { + out.Rule = policy.RunAsGroupStrategy(in.Rule) + out.Ranges = *(*[]policy.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +// Convert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions is an autogenerated conversion function. +func Convert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(in *v1beta1.RunAsGroupStrategyOptions, out *policy.RunAsGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(in, out, s) +} + +func autoConvert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(in *policy.RunAsGroupStrategyOptions, out *v1beta1.RunAsGroupStrategyOptions, s conversion.Scope) error { + out.Rule = v1beta1.RunAsGroupStrategy(in.Rule) + out.Ranges = *(*[]v1beta1.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +// Convert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions is an autogenerated conversion function. +func Convert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(in *policy.RunAsGroupStrategyOptions, out *v1beta1.RunAsGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(in, out, s) +} + func autoConvert_v1beta1_RunAsUserStrategyOptions_To_policy_RunAsUserStrategyOptions(in *v1beta1.RunAsUserStrategyOptions, out *policy.RunAsUserStrategyOptions, s conversion.Scope) error { out.Rule = policy.RunAsUserStrategy(in.Rule) out.Ranges = *(*[]policy.IDRange)(unsafe.Pointer(&in.Ranges)) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go index b0cfe75289070..4b5eb45794642 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go @@ -348,6 +348,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { } in.SELinux.DeepCopyInto(&out.SELinux) in.RunAsUser.DeepCopyInto(&out.RunAsUser) + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(RunAsGroupStrategyOptions) + (*in).DeepCopyInto(*out) + } in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) in.FSGroup.DeepCopyInto(&out.FSGroup) if in.DefaultAllowPrivilegeEscalation != nil { @@ -393,6 +398,27 @@ func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. +func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/rbac/OWNERS index 1aefde049a0a2..ff4a7f4bf9ad0 100755 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/OWNERS @@ -1,17 +1,7 @@ +# approval on api packages bubbles to api-approvers reviewers: -- thockin -- lavalamp -- smarterclayton -- deads2k -- sttts -- ncdc -- dims -- krousey -- mml -- mbohlool -- david-mcmahon -- ericchiang -- lixiaobing10051267 -- jianhuiz -- liggitt -- enj +- sig-auth-authorizers-approvers +- sig-auth-authorizers-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go index bebcb771d12eb..ea2309eea74b9 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=rbac.authorization.k8s.io + package rbac // import "k8s.io/kubernetes/pkg/apis/rbac" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/doc.go index 5608caba7055b..b96dc30bc51d9 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/doc.go @@ -21,4 +21,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=rbac.authorization.k8s.io + package v1 // import "k8s.io/kubernetes/pkg/apis/rbac/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go index 365f388143656..67aacff933bee 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/rbac/v1alpha1 // +groupName=rbac.authorization.k8s.io + package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go index 7ba759013a2b7..854453e8df13d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/rbac/v1beta1 // +groupName=rbac.authorization.k8s.io + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go index f2745c227e265..bab0ae332af39 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=scheduling.k8s.io + package scheduling // import "k8s.io/kubernetes/pkg/apis/scheduling" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/doc.go index e2bf21c769726..f314950470595 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=scheduling.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/scheduling/v1alpha1 + package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/doc.go index 117381f65f431..ba57c832261a9 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=scheduling.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/scheduling/v1beta1 + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/scheduling/v1beta1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go index 6093e3e824140..297432dceb467 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=settings.k8s.io + package settings // import "k8s.io/kubernetes/pkg/apis/settings" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go index 4422bb3e77b7f..998e91f833ccc 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/settings/v1alpha1 // +groupName=settings.k8s.io + package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/settings/v1alpha1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go index 5220b981e4740..52b2c2d822c65 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=storage.k8s.io + package storage // import "k8s.io/kubernetes/pkg/apis/storage" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go index 617aa14c1aa03..d46a103b9a641 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=storage.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/storage/v1 -package v1 + +package v1 // import "k8s.io/kubernetes/pkg/apis/storage/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go index be7a27582e619..436a862671930 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go @@ -58,6 +58,66 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*v1.VolumeAttachment)(nil), (*storage.VolumeAttachment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeAttachment_To_storage_VolumeAttachment(a.(*v1.VolumeAttachment), b.(*storage.VolumeAttachment), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachment)(nil), (*v1.VolumeAttachment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_VolumeAttachment_To_v1_VolumeAttachment(a.(*storage.VolumeAttachment), b.(*v1.VolumeAttachment), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeAttachmentList)(nil), (*storage.VolumeAttachmentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList(a.(*v1.VolumeAttachmentList), b.(*storage.VolumeAttachmentList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentList)(nil), (*v1.VolumeAttachmentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList(a.(*storage.VolumeAttachmentList), b.(*v1.VolumeAttachmentList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeAttachmentSource)(nil), (*storage.VolumeAttachmentSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(a.(*v1.VolumeAttachmentSource), b.(*storage.VolumeAttachmentSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentSource)(nil), (*v1.VolumeAttachmentSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(a.(*storage.VolumeAttachmentSource), b.(*v1.VolumeAttachmentSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeAttachmentSpec)(nil), (*storage.VolumeAttachmentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(a.(*v1.VolumeAttachmentSpec), b.(*storage.VolumeAttachmentSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentSpec)(nil), (*v1.VolumeAttachmentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(a.(*storage.VolumeAttachmentSpec), b.(*v1.VolumeAttachmentSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeAttachmentStatus)(nil), (*storage.VolumeAttachmentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(a.(*v1.VolumeAttachmentStatus), b.(*storage.VolumeAttachmentStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentStatus)(nil), (*v1.VolumeAttachmentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(a.(*storage.VolumeAttachmentStatus), b.(*v1.VolumeAttachmentStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeError)(nil), (*storage.VolumeError)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeError_To_storage_VolumeError(a.(*v1.VolumeError), b.(*storage.VolumeError), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.VolumeError)(nil), (*v1.VolumeError)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_VolumeError_To_v1_VolumeError(a.(*storage.VolumeError), b.(*v1.VolumeError), scope) + }); err != nil { + return err + } return nil } @@ -116,3 +176,153 @@ func autoConvert_storage_StorageClassList_To_v1_StorageClassList(in *storage.Sto func Convert_storage_StorageClassList_To_v1_StorageClassList(in *storage.StorageClassList, out *v1.StorageClassList, s conversion.Scope) error { return autoConvert_storage_StorageClassList_To_v1_StorageClassList(in, out, s) } + +func autoConvert_v1_VolumeAttachment_To_storage_VolumeAttachment(in *v1.VolumeAttachment, out *storage.VolumeAttachment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_VolumeAttachment_To_storage_VolumeAttachment is an autogenerated conversion function. +func Convert_v1_VolumeAttachment_To_storage_VolumeAttachment(in *v1.VolumeAttachment, out *storage.VolumeAttachment, s conversion.Scope) error { + return autoConvert_v1_VolumeAttachment_To_storage_VolumeAttachment(in, out, s) +} + +func autoConvert_storage_VolumeAttachment_To_v1_VolumeAttachment(in *storage.VolumeAttachment, out *v1.VolumeAttachment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_storage_VolumeAttachment_To_v1_VolumeAttachment is an autogenerated conversion function. +func Convert_storage_VolumeAttachment_To_v1_VolumeAttachment(in *storage.VolumeAttachment, out *v1.VolumeAttachment, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachment_To_v1_VolumeAttachment(in, out, s) +} + +func autoConvert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in *v1.VolumeAttachmentList, out *storage.VolumeAttachmentList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]storage.VolumeAttachment)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList is an autogenerated conversion function. +func Convert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in *v1.VolumeAttachmentList, out *storage.VolumeAttachmentList, s conversion.Scope) error { + return autoConvert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in, out, s) +} + +func autoConvert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList(in *storage.VolumeAttachmentList, out *v1.VolumeAttachmentList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.VolumeAttachment)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList is an autogenerated conversion function. +func Convert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList(in *storage.VolumeAttachmentList, out *v1.VolumeAttachmentList, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList(in, out, s) +} + +func autoConvert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in *v1.VolumeAttachmentSource, out *storage.VolumeAttachmentSource, s conversion.Scope) error { + out.PersistentVolumeName = (*string)(unsafe.Pointer(in.PersistentVolumeName)) + return nil +} + +// Convert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource is an autogenerated conversion function. +func Convert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in *v1.VolumeAttachmentSource, out *storage.VolumeAttachmentSource, s conversion.Scope) error { + return autoConvert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in, out, s) +} + +func autoConvert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(in *storage.VolumeAttachmentSource, out *v1.VolumeAttachmentSource, s conversion.Scope) error { + out.PersistentVolumeName = (*string)(unsafe.Pointer(in.PersistentVolumeName)) + return nil +} + +// Convert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource is an autogenerated conversion function. +func Convert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(in *storage.VolumeAttachmentSource, out *v1.VolumeAttachmentSource, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(in, out, s) +} + +func autoConvert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in *v1.VolumeAttachmentSpec, out *storage.VolumeAttachmentSpec, s conversion.Scope) error { + out.Attacher = in.Attacher + if err := Convert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.NodeName = in.NodeName + return nil +} + +// Convert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec is an autogenerated conversion function. +func Convert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in *v1.VolumeAttachmentSpec, out *storage.VolumeAttachmentSpec, s conversion.Scope) error { + return autoConvert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in, out, s) +} + +func autoConvert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *v1.VolumeAttachmentSpec, s conversion.Scope) error { + out.Attacher = in.Attacher + if err := Convert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.NodeName = in.NodeName + return nil +} + +// Convert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec is an autogenerated conversion function. +func Convert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *v1.VolumeAttachmentSpec, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(in, out, s) +} + +func autoConvert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in *v1.VolumeAttachmentStatus, out *storage.VolumeAttachmentStatus, s conversion.Scope) error { + out.Attached = in.Attached + out.AttachmentMetadata = *(*map[string]string)(unsafe.Pointer(&in.AttachmentMetadata)) + out.AttachError = (*storage.VolumeError)(unsafe.Pointer(in.AttachError)) + out.DetachError = (*storage.VolumeError)(unsafe.Pointer(in.DetachError)) + return nil +} + +// Convert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus is an autogenerated conversion function. +func Convert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in *v1.VolumeAttachmentStatus, out *storage.VolumeAttachmentStatus, s conversion.Scope) error { + return autoConvert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in, out, s) +} + +func autoConvert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(in *storage.VolumeAttachmentStatus, out *v1.VolumeAttachmentStatus, s conversion.Scope) error { + out.Attached = in.Attached + out.AttachmentMetadata = *(*map[string]string)(unsafe.Pointer(&in.AttachmentMetadata)) + out.AttachError = (*v1.VolumeError)(unsafe.Pointer(in.AttachError)) + out.DetachError = (*v1.VolumeError)(unsafe.Pointer(in.DetachError)) + return nil +} + +// Convert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus is an autogenerated conversion function. +func Convert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(in *storage.VolumeAttachmentStatus, out *v1.VolumeAttachmentStatus, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(in, out, s) +} + +func autoConvert_v1_VolumeError_To_storage_VolumeError(in *v1.VolumeError, out *storage.VolumeError, s conversion.Scope) error { + out.Time = in.Time + out.Message = in.Message + return nil +} + +// Convert_v1_VolumeError_To_storage_VolumeError is an autogenerated conversion function. +func Convert_v1_VolumeError_To_storage_VolumeError(in *v1.VolumeError, out *storage.VolumeError, s conversion.Scope) error { + return autoConvert_v1_VolumeError_To_storage_VolumeError(in, out, s) +} + +func autoConvert_storage_VolumeError_To_v1_VolumeError(in *storage.VolumeError, out *v1.VolumeError, s conversion.Scope) error { + out.Time = in.Time + out.Message = in.Message + return nil +} + +// Convert_storage_VolumeError_To_v1_VolumeError is an autogenerated conversion function. +func Convert_storage_VolumeError_To_v1_VolumeError(in *storage.VolumeError, out *v1.VolumeError, s conversion.Scope) error { + return autoConvert_storage_VolumeError_To_v1_VolumeError(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/doc.go index 07c766c3a2815..e3df1a906962a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=storage.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/storage/v1alpha1 + package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/storage/v1alpha1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go index a5b0ca68ab802..ed4fe193a1170 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=storage.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/storage/v1beta1 + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/storage/v1beta1" diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD.bazel deleted file mode 100644 index e63bd57129fe0..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD.bazel +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go deleted file mode 100644 index 554d26807133f..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go +++ /dev/null @@ -1,314 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalclientset - -import ( - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" - admissionregistrationinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion" - appsinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" - authenticationinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion" - authorizationinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion" - autoscalinginternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion" - batchinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" - certificatesinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion" - coordinationinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion" - coreinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - eventsinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion" - extensionsinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" - networkinginternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion" - policyinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion" - rbacinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" - schedulinginternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion" - settingsinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion" - storageinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - Admissionregistration() admissionregistrationinternalversion.AdmissionregistrationInterface - Core() coreinternalversion.CoreInterface - Apps() appsinternalversion.AppsInterface - Authentication() authenticationinternalversion.AuthenticationInterface - Authorization() authorizationinternalversion.AuthorizationInterface - Autoscaling() autoscalinginternalversion.AutoscalingInterface - Batch() batchinternalversion.BatchInterface - Certificates() certificatesinternalversion.CertificatesInterface - Coordination() coordinationinternalversion.CoordinationInterface - Events() eventsinternalversion.EventsInterface - Extensions() extensionsinternalversion.ExtensionsInterface - Networking() networkinginternalversion.NetworkingInterface - Policy() policyinternalversion.PolicyInterface - Rbac() rbacinternalversion.RbacInterface - Scheduling() schedulinginternalversion.SchedulingInterface - Settings() settingsinternalversion.SettingsInterface - Storage() storageinternalversion.StorageInterface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - admissionregistration *admissionregistrationinternalversion.AdmissionregistrationClient - core *coreinternalversion.CoreClient - apps *appsinternalversion.AppsClient - authentication *authenticationinternalversion.AuthenticationClient - authorization *authorizationinternalversion.AuthorizationClient - autoscaling *autoscalinginternalversion.AutoscalingClient - batch *batchinternalversion.BatchClient - certificates *certificatesinternalversion.CertificatesClient - coordination *coordinationinternalversion.CoordinationClient - events *eventsinternalversion.EventsClient - extensions *extensionsinternalversion.ExtensionsClient - networking *networkinginternalversion.NetworkingClient - policy *policyinternalversion.PolicyClient - rbac *rbacinternalversion.RbacClient - scheduling *schedulinginternalversion.SchedulingClient - settings *settingsinternalversion.SettingsClient - storage *storageinternalversion.StorageClient -} - -// Admissionregistration retrieves the AdmissionregistrationClient -func (c *Clientset) Admissionregistration() admissionregistrationinternalversion.AdmissionregistrationInterface { - return c.admissionregistration -} - -// Core retrieves the CoreClient -func (c *Clientset) Core() coreinternalversion.CoreInterface { - return c.core -} - -// Apps retrieves the AppsClient -func (c *Clientset) Apps() appsinternalversion.AppsInterface { - return c.apps -} - -// Authentication retrieves the AuthenticationClient -func (c *Clientset) Authentication() authenticationinternalversion.AuthenticationInterface { - return c.authentication -} - -// Authorization retrieves the AuthorizationClient -func (c *Clientset) Authorization() authorizationinternalversion.AuthorizationInterface { - return c.authorization -} - -// Autoscaling retrieves the AutoscalingClient -func (c *Clientset) Autoscaling() autoscalinginternalversion.AutoscalingInterface { - return c.autoscaling -} - -// Batch retrieves the BatchClient -func (c *Clientset) Batch() batchinternalversion.BatchInterface { - return c.batch -} - -// Certificates retrieves the CertificatesClient -func (c *Clientset) Certificates() certificatesinternalversion.CertificatesInterface { - return c.certificates -} - -// Coordination retrieves the CoordinationClient -func (c *Clientset) Coordination() coordinationinternalversion.CoordinationInterface { - return c.coordination -} - -// Events retrieves the EventsClient -func (c *Clientset) Events() eventsinternalversion.EventsInterface { - return c.events -} - -// Extensions retrieves the ExtensionsClient -func (c *Clientset) Extensions() extensionsinternalversion.ExtensionsInterface { - return c.extensions -} - -// Networking retrieves the NetworkingClient -func (c *Clientset) Networking() networkinginternalversion.NetworkingInterface { - return c.networking -} - -// Policy retrieves the PolicyClient -func (c *Clientset) Policy() policyinternalversion.PolicyInterface { - return c.policy -} - -// Rbac retrieves the RbacClient -func (c *Clientset) Rbac() rbacinternalversion.RbacInterface { - return c.rbac -} - -// Scheduling retrieves the SchedulingClient -func (c *Clientset) Scheduling() schedulinginternalversion.SchedulingInterface { - return c.scheduling -} - -// Settings retrieves the SettingsClient -func (c *Clientset) Settings() settingsinternalversion.SettingsInterface { - return c.settings -} - -// Storage retrieves the StorageClient -func (c *Clientset) Storage() storageinternalversion.StorageInterface { - return c.storage -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var cs Clientset - var err error - cs.admissionregistration, err = admissionregistrationinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.core, err = coreinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.apps, err = appsinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.authentication, err = authenticationinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.authorization, err = authorizationinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.autoscaling, err = autoscalinginternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.batch, err = batchinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.certificates, err = certificatesinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.coordination, err = coordinationinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.events, err = eventsinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.extensions, err = extensionsinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.networking, err = networkinginternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.policy, err = policyinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.rbac, err = rbacinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.scheduling, err = schedulinginternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.settings, err = settingsinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.storage, err = storageinternalversion.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.admissionregistration = admissionregistrationinternalversion.NewForConfigOrDie(c) - cs.core = coreinternalversion.NewForConfigOrDie(c) - cs.apps = appsinternalversion.NewForConfigOrDie(c) - cs.authentication = authenticationinternalversion.NewForConfigOrDie(c) - cs.authorization = authorizationinternalversion.NewForConfigOrDie(c) - cs.autoscaling = autoscalinginternalversion.NewForConfigOrDie(c) - cs.batch = batchinternalversion.NewForConfigOrDie(c) - cs.certificates = certificatesinternalversion.NewForConfigOrDie(c) - cs.coordination = coordinationinternalversion.NewForConfigOrDie(c) - cs.events = eventsinternalversion.NewForConfigOrDie(c) - cs.extensions = extensionsinternalversion.NewForConfigOrDie(c) - cs.networking = networkinginternalversion.NewForConfigOrDie(c) - cs.policy = policyinternalversion.NewForConfigOrDie(c) - cs.rbac = rbacinternalversion.NewForConfigOrDie(c) - cs.scheduling = schedulinginternalversion.NewForConfigOrDie(c) - cs.settings = settingsinternalversion.NewForConfigOrDie(c) - cs.storage = storageinternalversion.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.admissionregistration = admissionregistrationinternalversion.New(c) - cs.core = coreinternalversion.New(c) - cs.apps = appsinternalversion.New(c) - cs.authentication = authenticationinternalversion.New(c) - cs.authorization = authorizationinternalversion.New(c) - cs.autoscaling = autoscalinginternalversion.New(c) - cs.batch = batchinternalversion.New(c) - cs.certificates = certificatesinternalversion.New(c) - cs.coordination = coordinationinternalversion.New(c) - cs.events = eventsinternalversion.New(c) - cs.extensions = extensionsinternalversion.New(c) - cs.networking = networkinginternalversion.New(c) - cs.policy = policyinternalversion.New(c) - cs.rbac = rbacinternalversion.New(c) - cs.scheduling = schedulinginternalversion.New(c) - cs.settings = settingsinternalversion.New(c) - cs.storage = storageinternalversion.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/register.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/register.go deleted file mode 100644 index 727d0e2995f84..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/register.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration/install" - apps "k8s.io/kubernetes/pkg/apis/apps/install" - authentication "k8s.io/kubernetes/pkg/apis/authentication/install" - authorization "k8s.io/kubernetes/pkg/apis/authorization/install" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/install" - batch "k8s.io/kubernetes/pkg/apis/batch/install" - certificates "k8s.io/kubernetes/pkg/apis/certificates/install" - coordination "k8s.io/kubernetes/pkg/apis/coordination/install" - core "k8s.io/kubernetes/pkg/apis/core/install" - events "k8s.io/kubernetes/pkg/apis/events/install" - extensions "k8s.io/kubernetes/pkg/apis/extensions/install" - networking "k8s.io/kubernetes/pkg/apis/networking/install" - policy "k8s.io/kubernetes/pkg/apis/policy/install" - rbac "k8s.io/kubernetes/pkg/apis/rbac/install" - scheduling "k8s.io/kubernetes/pkg/apis/scheduling/install" - settings "k8s.io/kubernetes/pkg/apis/settings/install" - storage "k8s.io/kubernetes/pkg/apis/storage/install" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - Install(Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(scheme *runtime.Scheme) { - admissionregistration.Install(scheme) - core.Install(scheme) - apps.Install(scheme) - authentication.Install(scheme) - authorization.Install(scheme) - autoscaling.Install(scheme) - batch.Install(scheme) - certificates.Install(scheme) - coordination.Install(scheme) - events.Install(scheme) - extensions.Install(scheme) - networking.Install(scheme) - policy.Install(scheme) - rbac.Install(scheme) - scheduling.Install(scheme) - settings.Install(scheme) - storage.Install(scheme) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/BUILD.bazel deleted file mode 100644 index 630516e7e61ea..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "admissionregistration_client.go", - "doc.go", - "generated_expansion.go", - "initializerconfiguration.go", - "mutatingwebhookconfiguration.go", - "validatingwebhookconfiguration.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/admissionregistration:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/admissionregistration_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/admissionregistration_client.go deleted file mode 100644 index 9ecefad4c96d5..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/admissionregistration_client.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type AdmissionregistrationInterface interface { - RESTClient() rest.Interface - InitializerConfigurationsGetter - MutatingWebhookConfigurationsGetter - ValidatingWebhookConfigurationsGetter -} - -// AdmissionregistrationClient is used to interact with features provided by the admissionregistration.k8s.io group. -type AdmissionregistrationClient struct { - restClient rest.Interface -} - -func (c *AdmissionregistrationClient) InitializerConfigurations() InitializerConfigurationInterface { - return newInitializerConfigurations(c) -} - -func (c *AdmissionregistrationClient) MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface { - return newMutatingWebhookConfigurations(c) -} - -func (c *AdmissionregistrationClient) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { - return newValidatingWebhookConfigurations(c) -} - -// NewForConfig creates a new AdmissionregistrationClient for the given config. -func NewForConfig(c *rest.Config) (*AdmissionregistrationClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AdmissionregistrationClient{client}, nil -} - -// NewForConfigOrDie creates a new AdmissionregistrationClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AdmissionregistrationClient for the given RESTClient. -func New(c rest.Interface) *AdmissionregistrationClient { - return &AdmissionregistrationClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("admissionregistration.k8s.io")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("admissionregistration.k8s.io")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AdmissionregistrationClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/initializerconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/initializerconfiguration.go deleted file mode 100644 index b792b5470a35b..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/initializerconfiguration.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// InitializerConfigurationsGetter has a method to return a InitializerConfigurationInterface. -// A group's client should implement this interface. -type InitializerConfigurationsGetter interface { - InitializerConfigurations() InitializerConfigurationInterface -} - -// InitializerConfigurationInterface has methods to work with InitializerConfiguration resources. -type InitializerConfigurationInterface interface { - Create(*admissionregistration.InitializerConfiguration) (*admissionregistration.InitializerConfiguration, error) - Update(*admissionregistration.InitializerConfiguration) (*admissionregistration.InitializerConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*admissionregistration.InitializerConfiguration, error) - List(opts v1.ListOptions) (*admissionregistration.InitializerConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistration.InitializerConfiguration, err error) - InitializerConfigurationExpansion -} - -// initializerConfigurations implements InitializerConfigurationInterface -type initializerConfigurations struct { - client rest.Interface -} - -// newInitializerConfigurations returns a InitializerConfigurations -func newInitializerConfigurations(c *AdmissionregistrationClient) *initializerConfigurations { - return &initializerConfigurations{ - client: c.RESTClient(), - } -} - -// Get takes name of the initializerConfiguration, and returns the corresponding initializerConfiguration object, and an error if there is any. -func (c *initializerConfigurations) Get(name string, options v1.GetOptions) (result *admissionregistration.InitializerConfiguration, err error) { - result = &admissionregistration.InitializerConfiguration{} - err = c.client.Get(). - Resource("initializerconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of InitializerConfigurations that match those selectors. -func (c *initializerConfigurations) List(opts v1.ListOptions) (result *admissionregistration.InitializerConfigurationList, err error) { - result = &admissionregistration.InitializerConfigurationList{} - err = c.client.Get(). - Resource("initializerconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested initializerConfigurations. -func (c *initializerConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("initializerconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a initializerConfiguration and creates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. -func (c *initializerConfigurations) Create(initializerConfiguration *admissionregistration.InitializerConfiguration) (result *admissionregistration.InitializerConfiguration, err error) { - result = &admissionregistration.InitializerConfiguration{} - err = c.client.Post(). - Resource("initializerconfigurations"). - Body(initializerConfiguration). - Do(). - Into(result) - return -} - -// Update takes the representation of a initializerConfiguration and updates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. -func (c *initializerConfigurations) Update(initializerConfiguration *admissionregistration.InitializerConfiguration) (result *admissionregistration.InitializerConfiguration, err error) { - result = &admissionregistration.InitializerConfiguration{} - err = c.client.Put(). - Resource("initializerconfigurations"). - Name(initializerConfiguration.Name). - Body(initializerConfiguration). - Do(). - Into(result) - return -} - -// Delete takes name of the initializerConfiguration and deletes it. Returns an error if one occurs. -func (c *initializerConfigurations) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("initializerconfigurations"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *initializerConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("initializerconfigurations"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched initializerConfiguration. -func (c *initializerConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistration.InitializerConfiguration, err error) { - result = &admissionregistration.InitializerConfiguration{} - err = c.client.Patch(pt). - Resource("initializerconfigurations"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go deleted file mode 100644 index c35d780fbfe9f..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. -// A group's client should implement this interface. -type MutatingWebhookConfigurationsGetter interface { - MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface -} - -// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. -type MutatingWebhookConfigurationInterface interface { - Create(*admissionregistration.MutatingWebhookConfiguration) (*admissionregistration.MutatingWebhookConfiguration, error) - Update(*admissionregistration.MutatingWebhookConfiguration) (*admissionregistration.MutatingWebhookConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*admissionregistration.MutatingWebhookConfiguration, error) - List(opts v1.ListOptions) (*admissionregistration.MutatingWebhookConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistration.MutatingWebhookConfiguration, err error) - MutatingWebhookConfigurationExpansion -} - -// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface -type mutatingWebhookConfigurations struct { - client rest.Interface -} - -// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations -func newMutatingWebhookConfigurations(c *AdmissionregistrationClient) *mutatingWebhookConfigurations { - return &mutatingWebhookConfigurations{ - client: c.RESTClient(), - } -} - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *admissionregistration.MutatingWebhookConfiguration, err error) { - result = &admissionregistration.MutatingWebhookConfiguration{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *admissionregistration.MutatingWebhookConfigurationList, err error) { - result = &admissionregistration.MutatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *admissionregistration.MutatingWebhookConfiguration) (result *admissionregistration.MutatingWebhookConfiguration, err error) { - result = &admissionregistration.MutatingWebhookConfiguration{} - err = c.client.Post(). - Resource("mutatingwebhookconfigurations"). - Body(mutatingWebhookConfiguration). - Do(). - Into(result) - return -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *admissionregistration.MutatingWebhookConfiguration) (result *admissionregistration.MutatingWebhookConfiguration, err error) { - result = &admissionregistration.MutatingWebhookConfiguration{} - err = c.client.Put(). - Resource("mutatingwebhookconfigurations"). - Name(mutatingWebhookConfiguration.Name). - Body(mutatingWebhookConfiguration). - Do(). - Into(result) - return -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistration.MutatingWebhookConfiguration, err error) { - result = &admissionregistration.MutatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("mutatingwebhookconfigurations"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go deleted file mode 100644 index 980d9b5b8a9fb..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. -// A group's client should implement this interface. -type ValidatingWebhookConfigurationsGetter interface { - ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface -} - -// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. -type ValidatingWebhookConfigurationInterface interface { - Create(*admissionregistration.ValidatingWebhookConfiguration) (*admissionregistration.ValidatingWebhookConfiguration, error) - Update(*admissionregistration.ValidatingWebhookConfiguration) (*admissionregistration.ValidatingWebhookConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*admissionregistration.ValidatingWebhookConfiguration, error) - List(opts v1.ListOptions) (*admissionregistration.ValidatingWebhookConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistration.ValidatingWebhookConfiguration, err error) - ValidatingWebhookConfigurationExpansion -} - -// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface -type validatingWebhookConfigurations struct { - client rest.Interface -} - -// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations -func newValidatingWebhookConfigurations(c *AdmissionregistrationClient) *validatingWebhookConfigurations { - return &validatingWebhookConfigurations{ - client: c.RESTClient(), - } -} - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *admissionregistration.ValidatingWebhookConfiguration, err error) { - result = &admissionregistration.ValidatingWebhookConfiguration{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *admissionregistration.ValidatingWebhookConfigurationList, err error) { - result = &admissionregistration.ValidatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *admissionregistration.ValidatingWebhookConfiguration) (result *admissionregistration.ValidatingWebhookConfiguration, err error) { - result = &admissionregistration.ValidatingWebhookConfiguration{} - err = c.client.Post(). - Resource("validatingwebhookconfigurations"). - Body(validatingWebhookConfiguration). - Do(). - Into(result) - return -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *admissionregistration.ValidatingWebhookConfiguration) (result *admissionregistration.ValidatingWebhookConfiguration, err error) { - result = &admissionregistration.ValidatingWebhookConfiguration{} - err = c.client.Put(). - Resource("validatingwebhookconfigurations"). - Name(validatingWebhookConfiguration.Name). - Body(validatingWebhookConfiguration). - Do(). - Into(result) - return -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *validatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistration.ValidatingWebhookConfiguration, err error) { - result = &admissionregistration.ValidatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("validatingwebhookconfigurations"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go deleted file mode 100644 index abcdc639fa86e..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type AppsInterface interface { - RESTClient() rest.Interface - ControllerRevisionsGetter - StatefulSetsGetter -} - -// AppsClient is used to interact with features provided by the apps group. -type AppsClient struct { - restClient rest.Interface -} - -func (c *AppsClient) ControllerRevisions(namespace string) ControllerRevisionInterface { - return newControllerRevisions(c, namespace) -} - -func (c *AppsClient) StatefulSets(namespace string) StatefulSetInterface { - return newStatefulSets(c, namespace) -} - -// NewForConfig creates a new AppsClient for the given config. -func NewForConfig(c *rest.Config) (*AppsClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AppsClient{client}, nil -} - -// NewForConfigOrDie creates a new AppsClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AppsClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AppsClient for the given RESTClient. -func New(c rest.Interface) *AppsClient { - return &AppsClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("apps")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("apps")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AppsClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/controllerrevision.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/controllerrevision.go deleted file mode 100644 index 33a28c3a327dc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/controllerrevision.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - apps "k8s.io/kubernetes/pkg/apis/apps" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. -// A group's client should implement this interface. -type ControllerRevisionsGetter interface { - ControllerRevisions(namespace string) ControllerRevisionInterface -} - -// ControllerRevisionInterface has methods to work with ControllerRevision resources. -type ControllerRevisionInterface interface { - Create(*apps.ControllerRevision) (*apps.ControllerRevision, error) - Update(*apps.ControllerRevision) (*apps.ControllerRevision, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*apps.ControllerRevision, error) - List(opts v1.ListOptions) (*apps.ControllerRevisionList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.ControllerRevision, err error) - ControllerRevisionExpansion -} - -// controllerRevisions implements ControllerRevisionInterface -type controllerRevisions struct { - client rest.Interface - ns string -} - -// newControllerRevisions returns a ControllerRevisions -func newControllerRevisions(c *AppsClient, namespace string) *controllerRevisions { - return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *apps.ControllerRevision, err error) { - result = &apps.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(opts v1.ListOptions) (result *apps.ControllerRevisionList, err error) { - result = &apps.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(controllerRevision *apps.ControllerRevision) (result *apps.ControllerRevision, err error) { - result = &apps.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - Body(controllerRevision). - Do(). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(controllerRevision *apps.ControllerRevision) (result *apps.ControllerRevision, err error) { - result = &apps.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - Body(controllerRevision). - Do(). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.ControllerRevision, err error) { - result = &apps.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go deleted file mode 100644 index 9bfab85765022..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -type ControllerRevisionExpansion interface{} - -type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go deleted file mode 100644 index 4d1e5fa16fde9..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go +++ /dev/null @@ -1,206 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - apps "k8s.io/kubernetes/pkg/apis/apps" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// StatefulSetsGetter has a method to return a StatefulSetInterface. -// A group's client should implement this interface. -type StatefulSetsGetter interface { - StatefulSets(namespace string) StatefulSetInterface -} - -// StatefulSetInterface has methods to work with StatefulSet resources. -type StatefulSetInterface interface { - Create(*apps.StatefulSet) (*apps.StatefulSet, error) - Update(*apps.StatefulSet) (*apps.StatefulSet, error) - UpdateStatus(*apps.StatefulSet) (*apps.StatefulSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*apps.StatefulSet, error) - List(opts v1.ListOptions) (*apps.StatefulSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.StatefulSet, err error) - GetScale(statefulSetName string, options v1.GetOptions) (*autoscaling.Scale, error) - UpdateScale(statefulSetName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) - - StatefulSetExpansion -} - -// statefulSets implements StatefulSetInterface -type statefulSets struct { - client rest.Interface - ns string -} - -// newStatefulSets returns a StatefulSets -func newStatefulSets(c *AppsClient, namespace string) *statefulSets { - return &statefulSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(name string, options v1.GetOptions) (result *apps.StatefulSet, err error) { - result = &apps.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(opts v1.ListOptions) (result *apps.StatefulSetList, err error) { - result = &apps.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(statefulSet *apps.StatefulSet) (result *apps.StatefulSet, err error) { - result = &apps.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - Body(statefulSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(statefulSet *apps.StatefulSet) (result *apps.StatefulSet, err error) { - result = &apps.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - Body(statefulSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *statefulSets) UpdateStatus(statefulSet *apps.StatefulSet) (result *apps.StatefulSet, err error) { - result = &apps.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - Body(statefulSet). - Do(). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.StatefulSet, err error) { - result = &apps.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} - -// GetScale takes name of the statefulSet, and returns the corresponding autoscaling.Scale object, and an error if there is any. -func (c *statefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSetName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *statefulSets) UpdateScale(statefulSetName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSetName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD.bazel deleted file mode 100644 index a8998f19ad17c..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authentication_client.go", - "doc.go", - "generated_expansion.go", - "tokenreview.go", - "tokenreview_expansion.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/authentication:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/authentication_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/authentication_client.go deleted file mode 100644 index 086b38b4624a7..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/authentication_client.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type AuthenticationInterface interface { - RESTClient() rest.Interface - TokenReviewsGetter -} - -// AuthenticationClient is used to interact with features provided by the authentication.k8s.io group. -type AuthenticationClient struct { - restClient rest.Interface -} - -func (c *AuthenticationClient) TokenReviews() TokenReviewInterface { - return newTokenReviews(c) -} - -// NewForConfig creates a new AuthenticationClient for the given config. -func NewForConfig(c *rest.Config) (*AuthenticationClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AuthenticationClient{client}, nil -} - -// NewForConfigOrDie creates a new AuthenticationClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AuthenticationClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AuthenticationClient for the given RESTClient. -func New(c rest.Interface) *AuthenticationClient { - return &AuthenticationClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("authentication.k8s.io")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("authentication.k8s.io")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AuthenticationClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview.go deleted file mode 100644 index 04939f148cc66..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" -) - -// TokenReviewsGetter has a method to return a TokenReviewInterface. -// A group's client should implement this interface. -type TokenReviewsGetter interface { - TokenReviews() TokenReviewInterface -} - -// TokenReviewInterface has methods to work with TokenReview resources. -type TokenReviewInterface interface { - TokenReviewExpansion -} - -// tokenReviews implements TokenReviewInterface -type tokenReviews struct { - client rest.Interface -} - -// newTokenReviews returns a TokenReviews -func newTokenReviews(c *AuthenticationClient) *tokenReviews { - return &tokenReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview_expansion.go deleted file mode 100644 index 57bdc3ccda5fc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - authenticationapi "k8s.io/kubernetes/pkg/apis/authentication" -) - -type TokenReviewExpansion interface { - Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) -} - -func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - result = &authenticationapi.TokenReview{} - err = c.client.Post(). - Resource("tokenreviews"). - Body(tokenReview). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD.bazel deleted file mode 100644 index 589bfc4a62931..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authorization_client.go", - "doc.go", - "generated_expansion.go", - "localsubjectaccessreview.go", - "localsubjectaccessreview_expansion.go", - "selfsubjectaccessreview.go", - "selfsubjectaccessreview_expansion.go", - "selfsubjectrulesreview.go", - "selfsubjectrulesreview_expansion.go", - "subjectaccessreview.go", - "subjectaccessreview_expansion.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/authorization:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/authorization_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/authorization_client.go deleted file mode 100644 index f97f54644bced..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/authorization_client.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type AuthorizationInterface interface { - RESTClient() rest.Interface - LocalSubjectAccessReviewsGetter - SelfSubjectAccessReviewsGetter - SelfSubjectRulesReviewsGetter - SubjectAccessReviewsGetter -} - -// AuthorizationClient is used to interact with features provided by the authorization.k8s.io group. -type AuthorizationClient struct { - restClient rest.Interface -} - -func (c *AuthorizationClient) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface { - return newLocalSubjectAccessReviews(c, namespace) -} - -func (c *AuthorizationClient) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface { - return newSelfSubjectAccessReviews(c) -} - -func (c *AuthorizationClient) SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface { - return newSelfSubjectRulesReviews(c) -} - -func (c *AuthorizationClient) SubjectAccessReviews() SubjectAccessReviewInterface { - return newSubjectAccessReviews(c) -} - -// NewForConfig creates a new AuthorizationClient for the given config. -func NewForConfig(c *rest.Config) (*AuthorizationClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AuthorizationClient{client}, nil -} - -// NewForConfigOrDie creates a new AuthorizationClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AuthorizationClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AuthorizationClient for the given RESTClient. -func New(c rest.Interface) *AuthorizationClient { - return &AuthorizationClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("authorization.k8s.io")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("authorization.k8s.io")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AuthorizationClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview.go deleted file mode 100644 index b98f8b0a70c07..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" -) - -// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. -// A group's client should implement this interface. -type LocalSubjectAccessReviewsGetter interface { - LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface -} - -// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. -type LocalSubjectAccessReviewInterface interface { - LocalSubjectAccessReviewExpansion -} - -// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface -type localSubjectAccessReviews struct { - client rest.Interface - ns string -} - -// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews -func newLocalSubjectAccessReviews(c *AuthorizationClient, namespace string) *localSubjectAccessReviews { - return &localSubjectAccessReviews{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview_expansion.go deleted file mode 100644 index b49970bfeac00..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview_expansion.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" -) - -type LocalSubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) -} - -func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { - result = &authorizationapi.LocalSubjectAccessReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview.go deleted file mode 100644 index 978ca20c116da..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" -) - -// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. -// A group's client should implement this interface. -type SelfSubjectAccessReviewsGetter interface { - SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface -} - -// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. -type SelfSubjectAccessReviewInterface interface { - SelfSubjectAccessReviewExpansion -} - -// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface -type selfSubjectAccessReviews struct { - client rest.Interface -} - -// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews -func newSelfSubjectAccessReviews(c *AuthorizationClient) *selfSubjectAccessReviews { - return &selfSubjectAccessReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview_expansion.go deleted file mode 100644 index fcfe9e97b5bb3..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" -) - -type SelfSubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) -} - -func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { - result = &authorizationapi.SelfSubjectAccessReview{} - err = c.client.Post(). - Resource("selfsubjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectrulesreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectrulesreview.go deleted file mode 100644 index 1a1d5701bbeb7..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectrulesreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" -) - -// SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. -// A group's client should implement this interface. -type SelfSubjectRulesReviewsGetter interface { - SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface -} - -// SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. -type SelfSubjectRulesReviewInterface interface { - SelfSubjectRulesReviewExpansion -} - -// selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface -type selfSubjectRulesReviews struct { - client rest.Interface -} - -// newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews -func newSelfSubjectRulesReviews(c *AuthorizationClient) *selfSubjectRulesReviews { - return &selfSubjectRulesReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectrulesreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectrulesreview_expansion.go deleted file mode 100644 index 5f8f754a68f7a..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectrulesreview_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" -) - -type SelfSubjectRulesReviewExpansion interface { - Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) -} - -func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { - result = &authorizationapi.SelfSubjectRulesReview{} - err = c.client.Post(). - Resource("selfsubjectrulesreviews"). - Body(srr). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview.go deleted file mode 100644 index f442b59a1a6bd..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" -) - -// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. -// A group's client should implement this interface. -type SubjectAccessReviewsGetter interface { - SubjectAccessReviews() SubjectAccessReviewInterface -} - -// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. -type SubjectAccessReviewInterface interface { - SubjectAccessReviewExpansion -} - -// subjectAccessReviews implements SubjectAccessReviewInterface -type subjectAccessReviews struct { - client rest.Interface -} - -// newSubjectAccessReviews returns a SubjectAccessReviews -func newSubjectAccessReviews(c *AuthorizationClient) *subjectAccessReviews { - return &subjectAccessReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview_expansion.go deleted file mode 100644 index 44d672d604a1f..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" -) - -type SubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) -} - -func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { - result = &authorizationapi.SubjectAccessReview{} - err = c.client.Post(). - Resource("subjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD.bazel deleted file mode 100644 index ca68b45ab6427..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "autoscaling_client.go", - "doc.go", - "generated_expansion.go", - "horizontalpodautoscaler.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/autoscaling_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/autoscaling_client.go deleted file mode 100644 index e00bc91913aa2..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/autoscaling_client.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type AutoscalingInterface interface { - RESTClient() rest.Interface - HorizontalPodAutoscalersGetter -} - -// AutoscalingClient is used to interact with features provided by the autoscaling group. -type AutoscalingClient struct { - restClient rest.Interface -} - -func (c *AutoscalingClient) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { - return newHorizontalPodAutoscalers(c, namespace) -} - -// NewForConfig creates a new AutoscalingClient for the given config. -func NewForConfig(c *rest.Config) (*AutoscalingClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AutoscalingClient{client}, nil -} - -// NewForConfigOrDie creates a new AutoscalingClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AutoscalingClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AutoscalingClient for the given RESTClient. -func New(c rest.Interface) *AutoscalingClient { - return &AutoscalingClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("autoscaling")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("autoscaling")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AutoscalingClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go deleted file mode 100644 index 39ec41254f810..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. -// A group's client should implement this interface. -type HorizontalPodAutoscalersGetter interface { - HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface -} - -// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. -type HorizontalPodAutoscalerInterface interface { - Create(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) - Update(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) - UpdateStatus(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*autoscaling.HorizontalPodAutoscaler, error) - List(opts v1.ListOptions) (*autoscaling.HorizontalPodAutoscalerList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *autoscaling.HorizontalPodAutoscaler, err error) - HorizontalPodAutoscalerExpansion -} - -// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type horizontalPodAutoscalers struct { - client rest.Interface - ns string -} - -// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers -func newHorizontalPodAutoscalers(c *AutoscalingClient, namespace string) *horizontalPodAutoscalers { - return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *autoscaling.HorizontalPodAutoscalerList, err error) { - result = &autoscaling.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD.bazel deleted file mode 100644 index dc3c95ebe8d66..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "batch_client.go", - "cronjob.go", - "doc.go", - "generated_expansion.go", - "job.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/batch:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go deleted file mode 100644 index 72cb6cffa7188..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - batch "k8s.io/kubernetes/pkg/apis/batch" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// CronJobsGetter has a method to return a CronJobInterface. -// A group's client should implement this interface. -type CronJobsGetter interface { - CronJobs(namespace string) CronJobInterface -} - -// CronJobInterface has methods to work with CronJob resources. -type CronJobInterface interface { - Create(*batch.CronJob) (*batch.CronJob, error) - Update(*batch.CronJob) (*batch.CronJob, error) - UpdateStatus(*batch.CronJob) (*batch.CronJob, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*batch.CronJob, error) - List(opts v1.ListOptions) (*batch.CronJobList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *batch.CronJob, err error) - CronJobExpansion -} - -// cronJobs implements CronJobInterface -type cronJobs struct { - client rest.Interface - ns string -} - -// newCronJobs returns a CronJobs -func newCronJobs(c *BatchClient, namespace string) *cronJobs { - return &cronJobs{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(name string, options v1.GetOptions) (result *batch.CronJob, err error) { - result = &batch.CronJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(opts v1.ListOptions) (result *batch.CronJobList, err error) { - result = &batch.CronJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(cronJob *batch.CronJob) (result *batch.CronJob, err error) { - result = &batch.CronJob{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cronjobs"). - Body(cronJob). - Do(). - Into(result) - return -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(cronJob *batch.CronJob) (result *batch.CronJob, err error) { - result = &batch.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - Body(cronJob). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *cronJobs) UpdateStatus(cronJob *batch.CronJob) (result *batch.CronJob, err error) { - result = &batch.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - SubResource("status"). - Body(cronJob). - Do(). - Into(result) - return -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *batch.CronJob, err error) { - result = &batch.CronJob{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cronjobs"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go deleted file mode 100644 index e9f9dc545ca8c..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - batch "k8s.io/kubernetes/pkg/apis/batch" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// JobsGetter has a method to return a JobInterface. -// A group's client should implement this interface. -type JobsGetter interface { - Jobs(namespace string) JobInterface -} - -// JobInterface has methods to work with Job resources. -type JobInterface interface { - Create(*batch.Job) (*batch.Job, error) - Update(*batch.Job) (*batch.Job, error) - UpdateStatus(*batch.Job) (*batch.Job, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*batch.Job, error) - List(opts v1.ListOptions) (*batch.JobList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *batch.Job, err error) - JobExpansion -} - -// jobs implements JobInterface -type jobs struct { - client rest.Interface - ns string -} - -// newJobs returns a Jobs -func newJobs(c *BatchClient, namespace string) *jobs { - return &jobs{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *jobs) Get(name string, options v1.GetOptions) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *jobs) List(opts v1.ListOptions) (result *batch.JobList, err error) { - result = &batch.JobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *jobs) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Create(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Post(). - Namespace(c.ns). - Resource("jobs"). - Body(job). - Do(). - Into(result) - return -} - -// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Update(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - Body(job). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *jobs) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - SubResource("status"). - Body(job). - Do(). - Into(result) - return -} - -// Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *jobs) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *jobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched job. -func (c *jobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("jobs"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD.bazel deleted file mode 100644 index 44702661fa244..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "certificates_client.go", - "certificatesigningrequest.go", - "certificatesigningrequest_expansion.go", - "doc.go", - "generated_expansion.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/certificates:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificates_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificates_client.go deleted file mode 100644 index 852a42975298a..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificates_client.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type CertificatesInterface interface { - RESTClient() rest.Interface - CertificateSigningRequestsGetter -} - -// CertificatesClient is used to interact with features provided by the certificates.k8s.io group. -type CertificatesClient struct { - restClient rest.Interface -} - -func (c *CertificatesClient) CertificateSigningRequests() CertificateSigningRequestInterface { - return newCertificateSigningRequests(c) -} - -// NewForConfig creates a new CertificatesClient for the given config. -func NewForConfig(c *rest.Config) (*CertificatesClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CertificatesClient{client}, nil -} - -// NewForConfigOrDie creates a new CertificatesClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CertificatesClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CertificatesClient for the given RESTClient. -func New(c rest.Interface) *CertificatesClient { - return &CertificatesClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("certificates.k8s.io")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("certificates.k8s.io")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CertificatesClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go deleted file mode 100644 index 8a5b7b380779f..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - certificates "k8s.io/kubernetes/pkg/apis/certificates" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. -// A group's client should implement this interface. -type CertificateSigningRequestsGetter interface { - CertificateSigningRequests() CertificateSigningRequestInterface -} - -// CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. -type CertificateSigningRequestInterface interface { - Create(*certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) - Update(*certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) - UpdateStatus(*certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*certificates.CertificateSigningRequest, error) - List(opts v1.ListOptions) (*certificates.CertificateSigningRequestList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *certificates.CertificateSigningRequest, err error) - CertificateSigningRequestExpansion -} - -// certificateSigningRequests implements CertificateSigningRequestInterface -type certificateSigningRequests struct { - client rest.Interface -} - -// newCertificateSigningRequests returns a CertificateSigningRequests -func newCertificateSigningRequests(c *CertificatesClient) *certificateSigningRequests { - return &certificateSigningRequests{ - client: c.RESTClient(), - } -} - -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (result *certificates.CertificateSigningRequest, err error) { - result = &certificates.CertificateSigningRequest{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *certificates.CertificateSigningRequestList, err error) { - result = &certificates.CertificateSigningRequestList{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Create(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { - result = &certificates.CertificateSigningRequest{} - err = c.client.Post(). - Resource("certificatesigningrequests"). - Body(certificateSigningRequest). - Do(). - Into(result) - return -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Update(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { - result = &certificates.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - Body(certificateSigningRequest). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *certificateSigningRequests) UpdateStatus(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { - result = &certificates.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - SubResource("status"). - Body(certificateSigningRequest). - Do(). - Into(result) - return -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *certificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *certificates.CertificateSigningRequest, err error) { - result = &certificates.CertificateSigningRequest{} - err = c.client.Patch(pt). - Resource("certificatesigningrequests"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest_expansion.go deleted file mode 100644 index ffdde2056055a..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import "k8s.io/kubernetes/pkg/apis/certificates" - -type CertificateSigningRequestExpansion interface { - UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) -} - -func (c *certificateSigningRequests) UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { - result = &certificates.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - Body(certificateSigningRequest). - SubResource("approval"). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/BUILD.bazel deleted file mode 100644 index 273baf6a35f03..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "coordination_client.go", - "doc.go", - "generated_expansion.go", - "lease.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/coordination:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/coordination_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/coordination_client.go deleted file mode 100644 index af5307b7602a4..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/coordination_client.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type CoordinationInterface interface { - RESTClient() rest.Interface - LeasesGetter -} - -// CoordinationClient is used to interact with features provided by the coordination.k8s.io group. -type CoordinationClient struct { - restClient rest.Interface -} - -func (c *CoordinationClient) Leases(namespace string) LeaseInterface { - return newLeases(c, namespace) -} - -// NewForConfig creates a new CoordinationClient for the given config. -func NewForConfig(c *rest.Config) (*CoordinationClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CoordinationClient{client}, nil -} - -// NewForConfigOrDie creates a new CoordinationClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CoordinationClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CoordinationClient for the given RESTClient. -func New(c rest.Interface) *CoordinationClient { - return &CoordinationClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("coordination.k8s.io")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("coordination.k8s.io")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CoordinationClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/generated_expansion.go deleted file mode 100644 index 852379fbd9fe7..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -type LeaseExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/lease.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/lease.go deleted file mode 100644 index a2023c38b9073..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/lease.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - coordination "k8s.io/kubernetes/pkg/apis/coordination" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// LeasesGetter has a method to return a LeaseInterface. -// A group's client should implement this interface. -type LeasesGetter interface { - Leases(namespace string) LeaseInterface -} - -// LeaseInterface has methods to work with Lease resources. -type LeaseInterface interface { - Create(*coordination.Lease) (*coordination.Lease, error) - Update(*coordination.Lease) (*coordination.Lease, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*coordination.Lease, error) - List(opts v1.ListOptions) (*coordination.LeaseList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *coordination.Lease, err error) - LeaseExpansion -} - -// leases implements LeaseInterface -type leases struct { - client rest.Interface - ns string -} - -// newLeases returns a Leases -func newLeases(c *CoordinationClient, namespace string) *leases { - return &leases{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(name string, options v1.GetOptions) (result *coordination.Lease, err error) { - result = &coordination.Lease{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(opts v1.ListOptions) (result *coordination.LeaseList, err error) { - result = &coordination.LeaseList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(lease *coordination.Lease) (result *coordination.Lease, err error) { - result = &coordination.Lease{} - err = c.client.Post(). - Namespace(c.ns). - Resource("leases"). - Body(lease). - Do(). - Into(result) - return -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(lease *coordination.Lease) (result *coordination.Lease, err error) { - result = &coordination.Lease{} - err = c.client.Put(). - Namespace(c.ns). - Resource("leases"). - Name(lease.Name). - Body(lease). - Do(). - Into(result) - return -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched lease. -func (c *leases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *coordination.Lease, err error) { - result = &coordination.Lease{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("leases"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD.bazel deleted file mode 100644 index d67c2babbdf52..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD.bazel +++ /dev/null @@ -1,50 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "componentstatus.go", - "configmap.go", - "core_client.go", - "doc.go", - "endpoints.go", - "event.go", - "event_expansion.go", - "generated_expansion.go", - "limitrange.go", - "namespace.go", - "namespace_expansion.go", - "node.go", - "node_expansion.go", - "persistentvolume.go", - "persistentvolumeclaim.go", - "pod.go", - "pod_expansion.go", - "podtemplate.go", - "replicationcontroller.go", - "resourcequota.go", - "secret.go", - "service.go", - "service_expansion.go", - "serviceaccount.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/ref:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core/v1:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go deleted file mode 100644 index 07b75daab4c96..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ComponentStatusesGetter has a method to return a ComponentStatusInterface. -// A group's client should implement this interface. -type ComponentStatusesGetter interface { - ComponentStatuses() ComponentStatusInterface -} - -// ComponentStatusInterface has methods to work with ComponentStatus resources. -type ComponentStatusInterface interface { - Create(*core.ComponentStatus) (*core.ComponentStatus, error) - Update(*core.ComponentStatus) (*core.ComponentStatus, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.ComponentStatus, error) - List(opts v1.ListOptions) (*core.ComponentStatusList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ComponentStatus, err error) - ComponentStatusExpansion -} - -// componentStatuses implements ComponentStatusInterface -type componentStatuses struct { - client rest.Interface -} - -// newComponentStatuses returns a ComponentStatuses -func newComponentStatuses(c *CoreClient) *componentStatuses { - return &componentStatuses{ - client: c.RESTClient(), - } -} - -// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *componentStatuses) Get(name string, options v1.GetOptions) (result *core.ComponentStatus, err error) { - result = &core.ComponentStatus{} - err = c.client.Get(). - Resource("componentstatuses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *componentStatuses) List(opts v1.ListOptions) (result *core.ComponentStatusList, err error) { - result = &core.ComponentStatusList{} - err = c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *componentStatuses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Create(componentStatus *core.ComponentStatus) (result *core.ComponentStatus, err error) { - result = &core.ComponentStatus{} - err = c.client.Post(). - Resource("componentstatuses"). - Body(componentStatus). - Do(). - Into(result) - return -} - -// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Update(componentStatus *core.ComponentStatus) (result *core.ComponentStatus, err error) { - result = &core.ComponentStatus{} - err = c.client.Put(). - Resource("componentstatuses"). - Name(componentStatus.Name). - Body(componentStatus). - Do(). - Into(result) - return -} - -// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. -func (c *componentStatuses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("componentstatuses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *componentStatuses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("componentstatuses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched componentStatus. -func (c *componentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ComponentStatus, err error) { - result = &core.ComponentStatus{} - err = c.client.Patch(pt). - Resource("componentstatuses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go deleted file mode 100644 index af1fc1fa17c24..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ConfigMapsGetter has a method to return a ConfigMapInterface. -// A group's client should implement this interface. -type ConfigMapsGetter interface { - ConfigMaps(namespace string) ConfigMapInterface -} - -// ConfigMapInterface has methods to work with ConfigMap resources. -type ConfigMapInterface interface { - Create(*core.ConfigMap) (*core.ConfigMap, error) - Update(*core.ConfigMap) (*core.ConfigMap, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.ConfigMap, error) - List(opts v1.ListOptions) (*core.ConfigMapList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ConfigMap, err error) - ConfigMapExpansion -} - -// configMaps implements ConfigMapInterface -type configMaps struct { - client rest.Interface - ns string -} - -// newConfigMaps returns a ConfigMaps -func newConfigMaps(c *CoreClient, namespace string) *configMaps { - return &configMaps{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *configMaps) Get(name string, options v1.GetOptions) (result *core.ConfigMap, err error) { - result = &core.ConfigMap{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *configMaps) List(opts v1.ListOptions) (result *core.ConfigMapList, err error) { - result = &core.ConfigMapList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *configMaps) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Create(configMap *core.ConfigMap) (result *core.ConfigMap, err error) { - result = &core.ConfigMap{} - err = c.client.Post(). - Namespace(c.ns). - Resource("configmaps"). - Body(configMap). - Do(). - Into(result) - return -} - -// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Update(configMap *core.ConfigMap) (result *core.ConfigMap, err error) { - result = &core.ConfigMap{} - err = c.client.Put(). - Namespace(c.ns). - Resource("configmaps"). - Name(configMap.Name). - Body(configMap). - Do(). - Into(result) - return -} - -// Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *configMaps) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *configMaps) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched configMap. -func (c *configMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ConfigMap, err error) { - result = &core.ConfigMap{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("configmaps"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/core_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/core_client.go deleted file mode 100644 index 6ba341b91b8ad..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/core_client.go +++ /dev/null @@ -1,171 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type CoreInterface interface { - RESTClient() rest.Interface - ComponentStatusesGetter - ConfigMapsGetter - EndpointsGetter - EventsGetter - LimitRangesGetter - NamespacesGetter - NodesGetter - PersistentVolumesGetter - PersistentVolumeClaimsGetter - PodsGetter - PodTemplatesGetter - ReplicationControllersGetter - ResourceQuotasGetter - SecretsGetter - ServicesGetter - ServiceAccountsGetter -} - -// CoreClient is used to interact with features provided by the group. -type CoreClient struct { - restClient rest.Interface -} - -func (c *CoreClient) ComponentStatuses() ComponentStatusInterface { - return newComponentStatuses(c) -} - -func (c *CoreClient) ConfigMaps(namespace string) ConfigMapInterface { - return newConfigMaps(c, namespace) -} - -func (c *CoreClient) Endpoints(namespace string) EndpointsInterface { - return newEndpoints(c, namespace) -} - -func (c *CoreClient) Events(namespace string) EventInterface { - return newEvents(c, namespace) -} - -func (c *CoreClient) LimitRanges(namespace string) LimitRangeInterface { - return newLimitRanges(c, namespace) -} - -func (c *CoreClient) Namespaces() NamespaceInterface { - return newNamespaces(c) -} - -func (c *CoreClient) Nodes() NodeInterface { - return newNodes(c) -} - -func (c *CoreClient) PersistentVolumes() PersistentVolumeInterface { - return newPersistentVolumes(c) -} - -func (c *CoreClient) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface { - return newPersistentVolumeClaims(c, namespace) -} - -func (c *CoreClient) Pods(namespace string) PodInterface { - return newPods(c, namespace) -} - -func (c *CoreClient) PodTemplates(namespace string) PodTemplateInterface { - return newPodTemplates(c, namespace) -} - -func (c *CoreClient) ReplicationControllers(namespace string) ReplicationControllerInterface { - return newReplicationControllers(c, namespace) -} - -func (c *CoreClient) ResourceQuotas(namespace string) ResourceQuotaInterface { - return newResourceQuotas(c, namespace) -} - -func (c *CoreClient) Secrets(namespace string) SecretInterface { - return newSecrets(c, namespace) -} - -func (c *CoreClient) Services(namespace string) ServiceInterface { - return newServices(c, namespace) -} - -func (c *CoreClient) ServiceAccounts(namespace string) ServiceAccountInterface { - return newServiceAccounts(c, namespace) -} - -// NewForConfig creates a new CoreClient for the given config. -func NewForConfig(c *rest.Config) (*CoreClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CoreClient{client}, nil -} - -// NewForConfigOrDie creates a new CoreClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CoreClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CoreClient for the given RESTClient. -func New(c rest.Interface) *CoreClient { - return &CoreClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/api" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CoreClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go deleted file mode 100644 index acb3fd21e57fd..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// EndpointsGetter has a method to return a EndpointsInterface. -// A group's client should implement this interface. -type EndpointsGetter interface { - Endpoints(namespace string) EndpointsInterface -} - -// EndpointsInterface has methods to work with Endpoints resources. -type EndpointsInterface interface { - Create(*core.Endpoints) (*core.Endpoints, error) - Update(*core.Endpoints) (*core.Endpoints, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.Endpoints, error) - List(opts v1.ListOptions) (*core.EndpointsList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Endpoints, err error) - EndpointsExpansion -} - -// endpoints implements EndpointsInterface -type endpoints struct { - client rest.Interface - ns string -} - -// newEndpoints returns a Endpoints -func newEndpoints(c *CoreClient, namespace string) *endpoints { - return &endpoints{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *endpoints) Get(name string, options v1.GetOptions) (result *core.Endpoints, err error) { - result = &core.Endpoints{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *endpoints) List(opts v1.ListOptions) (result *core.EndpointsList, err error) { - result = &core.EndpointsList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpoints. -func (c *endpoints) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Create(endpoints *core.Endpoints) (result *core.Endpoints, err error) { - result = &core.Endpoints{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpoints"). - Body(endpoints). - Do(). - Into(result) - return -} - -// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Update(endpoints *core.Endpoints) (result *core.Endpoints, err error) { - result = &core.Endpoints{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpoints"). - Name(endpoints.Name). - Body(endpoints). - Do(). - Into(result) - return -} - -// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. -func (c *endpoints) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpoints) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched endpoints. -func (c *endpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Endpoints, err error) { - result = &core.Endpoints{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpoints"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go deleted file mode 100644 index 16dbe65964215..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// EventsGetter has a method to return a EventInterface. -// A group's client should implement this interface. -type EventsGetter interface { - Events(namespace string) EventInterface -} - -// EventInterface has methods to work with Event resources. -type EventInterface interface { - Create(*core.Event) (*core.Event, error) - Update(*core.Event) (*core.Event, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.Event, error) - List(opts v1.ListOptions) (*core.EventList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Event, err error) - EventExpansion -} - -// events implements EventInterface -type events struct { - client rest.Interface - ns string -} - -// newEvents returns a Events -func newEvents(c *CoreClient, namespace string) *events { - return &events{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(name string, options v1.GetOptions) (result *core.Event, err error) { - result = &core.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(opts v1.ListOptions) (result *core.EventList, err error) { - result = &core.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(event *core.Event) (result *core.Event, err error) { - result = &core.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - Body(event). - Do(). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(event *core.Event) (result *core.Event, err error) { - result = &core.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - Body(event). - Do(). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Event, err error) { - result = &core.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go deleted file mode 100644 index 5f1ebb89cc9f6..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go +++ /dev/null @@ -1,199 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - "fmt" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/api/ref" - api "k8s.io/kubernetes/pkg/apis/core" - k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" -) - -// The EventExpansion interface allows manually adding extra methods to the EventInterface. -type EventExpansion interface { - // CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace. - CreateWithEventNamespace(event *api.Event) (*api.Event, error) - // UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace. - UpdateWithEventNamespace(event *api.Event) (*api.Event, error) - PatchWithEventNamespace(event *api.Event, data []byte) (*api.Event, error) - // Search finds events about the specified object - Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*api.EventList, error) - // Returns the appropriate field selector based on the API version being used to communicate with the server. - // The returned field selector can be used with List and Watch to filter desired events. - GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector -} - -// CreateWithEventNamespace makes a new event. Returns the copy of the event the server returns, -// or an error. The namespace to create the event within is deduced from the -// event; it must either match this event client's namespace, or this event -// client must have been created with the "" namespace. -func (e *events) CreateWithEventNamespace(event *api.Event) (*api.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) - } - result := &api.Event{} - err := e.client.Post(). - NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). - Resource("events"). - Body(event). - Do(). - Into(result) - return result, err -} - -// UpdateWithEventNamespace modifies an existing event. It returns the copy of the event that the server returns, -// or an error. The namespace and key to update the event within is deduced from the event. The -// namespace must either match this event client's namespace, or this event client must have been -// created with the "" namespace. Update also requires the ResourceVersion to be set in the event -// object. -func (e *events) UpdateWithEventNamespace(event *api.Event) (*api.Event, error) { - result := &api.Event{} - err := e.client.Put(). - NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). - Resource("events"). - Name(event.Name). - Body(event). - Do(). - Into(result) - return result, err -} - -// PatchWithEventNamespace modifies an existing event. It returns the copy of -// the event that the server returns, or an error. The namespace and name of the -// target event is deduced from the incompleteEvent. The namespace must either -// match this event client's namespace, or this event client must have been -// created with the "" namespace. -func (e *events) PatchWithEventNamespace(incompleteEvent *api.Event, data []byte) (*api.Event, error) { - if e.ns != "" && incompleteEvent.Namespace != e.ns { - return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns) - } - result := &api.Event{} - err := e.client.Patch(types.StrategicMergePatchType). - NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). - Resource("events"). - Name(incompleteEvent.Name). - Body(data). - Do(). - Into(result) - return result, err -} - -// Search finds events about the specified object. The namespace of the -// object must match this event's client namespace unless the event client -// was made with the "" namespace. -func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*api.EventList, error) { - ref, err := ref.GetReference(scheme, objOrRef) - if err != nil { - return nil, err - } - if e.ns != "" && ref.Namespace != e.ns { - return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) - } - stringRefKind := string(ref.Kind) - var refKind *string - if stringRefKind != "" { - refKind = &stringRefKind - } - stringRefUID := string(ref.UID) - var refUID *string - if stringRefUID != "" { - refUID = &stringRefUID - } - fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) - return e.List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) -} - -// Returns the appropriate field selector based on the API version being used to communicate with the server. -// The returned field selector can be used with List and Watch to filter desired events. -func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { - apiVersion := e.client.APIVersion().String() - field := fields.Set{} - if involvedObjectName != nil { - field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName - } - if involvedObjectNamespace != nil { - field["involvedObject.namespace"] = *involvedObjectNamespace - } - if involvedObjectKind != nil { - field["involvedObject.kind"] = *involvedObjectKind - } - if involvedObjectUID != nil { - field["involvedObject.uid"] = *involvedObjectUID - } - return field.AsSelector() -} - -// Returns the appropriate field label to use for name of the involved object as per the given API version. -func GetInvolvedObjectNameFieldLabel(version string) string { - return "involvedObject.name" -} - -// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset. -type EventSinkImpl struct { - Interface EventInterface -} - -func (e *EventSinkImpl) Create(event *v1.Event) (*v1.Event, error) { - internalEvent := &api.Event{} - err := k8s_api_v1.Convert_v1_Event_To_core_Event(event, internalEvent, nil) - if err != nil { - return nil, err - } - _, err = e.Interface.CreateWithEventNamespace(internalEvent) - if err != nil { - return nil, err - } - return event, nil -} - -func (e *EventSinkImpl) Update(event *v1.Event) (*v1.Event, error) { - internalEvent := &api.Event{} - err := k8s_api_v1.Convert_v1_Event_To_core_Event(event, internalEvent, nil) - if err != nil { - return nil, err - } - _, err = e.Interface.UpdateWithEventNamespace(internalEvent) - if err != nil { - return nil, err - } - return event, nil -} - -func (e *EventSinkImpl) Patch(event *v1.Event, data []byte) (*v1.Event, error) { - internalEvent := &api.Event{} - err := k8s_api_v1.Convert_v1_Event_To_core_Event(event, internalEvent, nil) - if err != nil { - return nil, err - } - internalEvent, err = e.Interface.PatchWithEventNamespace(internalEvent, data) - if err != nil { - return nil, err - } - externalEvent := &v1.Event{} - err = k8s_api_v1.Convert_core_Event_To_v1_Event(internalEvent, externalEvent, nil) - if err != nil { - // Patch succeeded, no need to report the failed conversion - return event, nil - } - return externalEvent, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/generated_expansion.go deleted file mode 100644 index 641c922606baa..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/generated_expansion.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -type ComponentStatusExpansion interface{} - -type ConfigMapExpansion interface{} - -type EndpointsExpansion interface{} - -type LimitRangeExpansion interface{} - -type PersistentVolumeExpansion interface{} - -type PersistentVolumeClaimExpansion interface{} - -type PodTemplateExpansion interface{} - -type ReplicationControllerExpansion interface{} - -type ResourceQuotaExpansion interface{} - -type SecretExpansion interface{} - -type ServiceAccountExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go deleted file mode 100644 index d16b9b5ae1aa8..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// LimitRangesGetter has a method to return a LimitRangeInterface. -// A group's client should implement this interface. -type LimitRangesGetter interface { - LimitRanges(namespace string) LimitRangeInterface -} - -// LimitRangeInterface has methods to work with LimitRange resources. -type LimitRangeInterface interface { - Create(*core.LimitRange) (*core.LimitRange, error) - Update(*core.LimitRange) (*core.LimitRange, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.LimitRange, error) - List(opts v1.ListOptions) (*core.LimitRangeList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.LimitRange, err error) - LimitRangeExpansion -} - -// limitRanges implements LimitRangeInterface -type limitRanges struct { - client rest.Interface - ns string -} - -// newLimitRanges returns a LimitRanges -func newLimitRanges(c *CoreClient, namespace string) *limitRanges { - return &limitRanges{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *limitRanges) Get(name string, options v1.GetOptions) (result *core.LimitRange, err error) { - result = &core.LimitRange{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *limitRanges) List(opts v1.ListOptions) (result *core.LimitRangeList, err error) { - result = &core.LimitRangeList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested limitRanges. -func (c *limitRanges) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Create(limitRange *core.LimitRange) (result *core.LimitRange, err error) { - result = &core.LimitRange{} - err = c.client.Post(). - Namespace(c.ns). - Resource("limitranges"). - Body(limitRange). - Do(). - Into(result) - return -} - -// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Update(limitRange *core.LimitRange) (result *core.LimitRange, err error) { - result = &core.LimitRange{} - err = c.client.Put(). - Namespace(c.ns). - Resource("limitranges"). - Name(limitRange.Name). - Body(limitRange). - Do(). - Into(result) - return -} - -// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. -func (c *limitRanges) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *limitRanges) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched limitRange. -func (c *limitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.LimitRange, err error) { - result = &core.LimitRange{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("limitranges"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go deleted file mode 100644 index 5a0f413d35e3b..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// NamespacesGetter has a method to return a NamespaceInterface. -// A group's client should implement this interface. -type NamespacesGetter interface { - Namespaces() NamespaceInterface -} - -// NamespaceInterface has methods to work with Namespace resources. -type NamespaceInterface interface { - Create(*core.Namespace) (*core.Namespace, error) - Update(*core.Namespace) (*core.Namespace, error) - UpdateStatus(*core.Namespace) (*core.Namespace, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.Namespace, error) - List(opts v1.ListOptions) (*core.NamespaceList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Namespace, err error) - NamespaceExpansion -} - -// namespaces implements NamespaceInterface -type namespaces struct { - client rest.Interface -} - -// newNamespaces returns a Namespaces -func newNamespaces(c *CoreClient) *namespaces { - return &namespaces{ - client: c.RESTClient(), - } -} - -// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *namespaces) Get(name string, options v1.GetOptions) (result *core.Namespace, err error) { - result = &core.Namespace{} - err = c.client.Get(). - Resource("namespaces"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *namespaces) List(opts v1.ListOptions) (result *core.NamespaceList, err error) { - result = &core.NamespaceList{} - err = c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *namespaces) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(namespace *core.Namespace) (result *core.Namespace, err error) { - result = &core.Namespace{} - err = c.client.Post(). - Resource("namespaces"). - Body(namespace). - Do(). - Into(result) - return -} - -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(namespace *core.Namespace) (result *core.Namespace, err error) { - result = &core.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - Body(namespace). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *namespaces) UpdateStatus(namespace *core.Namespace) (result *core.Namespace, err error) { - result = &core.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - SubResource("status"). - Body(namespace). - Do(). - Into(result) - return -} - -// Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *namespaces) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("namespaces"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *namespaces) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("namespaces"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched namespace. -func (c *namespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Namespace, err error) { - result = &core.Namespace{} - err = c.client.Patch(pt). - Resource("namespaces"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace_expansion.go deleted file mode 100644 index 29c7bcf2e24bb..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace_expansion.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - api "k8s.io/kubernetes/pkg/apis/core" -) - -// The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. -type NamespaceExpansion interface { - Finalize(item *api.Namespace) (*api.Namespace, error) -} - -// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. -func (c *namespaces) Finalize(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go deleted file mode 100644 index 515663c4d6922..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// NodesGetter has a method to return a NodeInterface. -// A group's client should implement this interface. -type NodesGetter interface { - Nodes() NodeInterface -} - -// NodeInterface has methods to work with Node resources. -type NodeInterface interface { - Create(*core.Node) (*core.Node, error) - Update(*core.Node) (*core.Node, error) - UpdateStatus(*core.Node) (*core.Node, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.Node, error) - List(opts v1.ListOptions) (*core.NodeList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Node, err error) - NodeExpansion -} - -// nodes implements NodeInterface -type nodes struct { - client rest.Interface -} - -// newNodes returns a Nodes -func newNodes(c *CoreClient) *nodes { - return &nodes{ - client: c.RESTClient(), - } -} - -// Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *nodes) Get(name string, options v1.GetOptions) (result *core.Node, err error) { - result = &core.Node{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *nodes) List(opts v1.ListOptions) (result *core.NodeList, err error) { - result = &core.NodeList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *nodes) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Create(node *core.Node) (result *core.Node, err error) { - result = &core.Node{} - err = c.client.Post(). - Resource("nodes"). - Body(node). - Do(). - Into(result) - return -} - -// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Update(node *core.Node) (result *core.Node, err error) { - result = &core.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - Body(node). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *nodes) UpdateStatus(node *core.Node) (result *core.Node, err error) { - result = &core.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - SubResource("status"). - Body(node). - Do(). - Into(result) - return -} - -// Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *nodes) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("nodes"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *nodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("nodes"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched node. -func (c *nodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Node, err error) { - result = &core.Node{} - err = c.client.Patch(pt). - Resource("nodes"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node_expansion.go deleted file mode 100644 index 8e29d5f1ef0b4..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node_expansion.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/apis/core" -) - -// The NodeExpansion interface allows manually adding extra methods to the NodeInterface. -type NodeExpansion interface { - // PatchStatus modifies the status of an existing node. It returns the copy - // of the node that the server returns, or an error. - PatchStatus(nodeName string, data []byte) (*api.Node, error) -} - -// PatchStatus modifies the status of an existing node. It returns the copy of -// the node that the server returns, or an error. -func (c *nodes) PatchStatus(nodeName string, data []byte) (*api.Node, error) { - result := &api.Node{} - err := c.client.Patch(types.StrategicMergePatchType). - Resource("nodes"). - Name(nodeName). - SubResource("status"). - Body(data). - Do(). - Into(result) - return result, err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go deleted file mode 100644 index fb717d8a3f6f4..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// PersistentVolumesGetter has a method to return a PersistentVolumeInterface. -// A group's client should implement this interface. -type PersistentVolumesGetter interface { - PersistentVolumes() PersistentVolumeInterface -} - -// PersistentVolumeInterface has methods to work with PersistentVolume resources. -type PersistentVolumeInterface interface { - Create(*core.PersistentVolume) (*core.PersistentVolume, error) - Update(*core.PersistentVolume) (*core.PersistentVolume, error) - UpdateStatus(*core.PersistentVolume) (*core.PersistentVolume, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.PersistentVolume, error) - List(opts v1.ListOptions) (*core.PersistentVolumeList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.PersistentVolume, err error) - PersistentVolumeExpansion -} - -// persistentVolumes implements PersistentVolumeInterface -type persistentVolumes struct { - client rest.Interface -} - -// newPersistentVolumes returns a PersistentVolumes -func newPersistentVolumes(c *CoreClient) *persistentVolumes { - return &persistentVolumes{ - client: c.RESTClient(), - } -} - -// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *persistentVolumes) Get(name string, options v1.GetOptions) (result *core.PersistentVolume, err error) { - result = &core.PersistentVolume{} - err = c.client.Get(). - Resource("persistentvolumes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *persistentVolumes) List(opts v1.ListOptions) (result *core.PersistentVolumeList, err error) { - result = &core.PersistentVolumeList{} - err = c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *persistentVolumes) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Create(persistentVolume *core.PersistentVolume) (result *core.PersistentVolume, err error) { - result = &core.PersistentVolume{} - err = c.client.Post(). - Resource("persistentvolumes"). - Body(persistentVolume). - Do(). - Into(result) - return -} - -// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Update(persistentVolume *core.PersistentVolume) (result *core.PersistentVolume, err error) { - result = &core.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - Body(persistentVolume). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *persistentVolumes) UpdateStatus(persistentVolume *core.PersistentVolume) (result *core.PersistentVolume, err error) { - result = &core.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - SubResource("status"). - Body(persistentVolume). - Do(). - Into(result) - return -} - -// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. -func (c *persistentVolumes) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("persistentvolumes"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("persistentvolumes"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched persistentVolume. -func (c *persistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.PersistentVolume, err error) { - result = &core.PersistentVolume{} - err = c.client.Patch(pt). - Resource("persistentvolumes"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go deleted file mode 100644 index a0111d67cdce6..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface. -// A group's client should implement this interface. -type PersistentVolumeClaimsGetter interface { - PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface -} - -// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. -type PersistentVolumeClaimInterface interface { - Create(*core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) - Update(*core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) - UpdateStatus(*core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.PersistentVolumeClaim, error) - List(opts v1.ListOptions) (*core.PersistentVolumeClaimList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.PersistentVolumeClaim, err error) - PersistentVolumeClaimExpansion -} - -// persistentVolumeClaims implements PersistentVolumeClaimInterface -type persistentVolumeClaims struct { - client rest.Interface - ns string -} - -// newPersistentVolumeClaims returns a PersistentVolumeClaims -func newPersistentVolumeClaims(c *CoreClient, namespace string) *persistentVolumeClaims { - return &persistentVolumeClaims{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *persistentVolumeClaims) Get(name string, options v1.GetOptions) (result *core.PersistentVolumeClaim, err error) { - result = &core.PersistentVolumeClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *persistentVolumeClaims) List(opts v1.ListOptions) (result *core.PersistentVolumeClaimList, err error) { - result = &core.PersistentVolumeClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *persistentVolumeClaims) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Create(persistentVolumeClaim *core.PersistentVolumeClaim) (result *core.PersistentVolumeClaim, err error) { - result = &core.PersistentVolumeClaim{} - err = c.client.Post(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Body(persistentVolumeClaim). - Do(). - Into(result) - return -} - -// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Update(persistentVolumeClaim *core.PersistentVolumeClaim) (result *core.PersistentVolumeClaim, err error) { - result = &core.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - Body(persistentVolumeClaim). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *core.PersistentVolumeClaim) (result *core.PersistentVolumeClaim, err error) { - result = &core.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - SubResource("status"). - Body(persistentVolumeClaim). - Do(). - Into(result) - return -} - -// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. -func (c *persistentVolumeClaims) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumeClaims) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *persistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.PersistentVolumeClaim, err error) { - result = &core.PersistentVolumeClaim{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go deleted file mode 100644 index b73b5b4658707..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// PodsGetter has a method to return a PodInterface. -// A group's client should implement this interface. -type PodsGetter interface { - Pods(namespace string) PodInterface -} - -// PodInterface has methods to work with Pod resources. -type PodInterface interface { - Create(*core.Pod) (*core.Pod, error) - Update(*core.Pod) (*core.Pod, error) - UpdateStatus(*core.Pod) (*core.Pod, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.Pod, error) - List(opts v1.ListOptions) (*core.PodList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Pod, err error) - PodExpansion -} - -// pods implements PodInterface -type pods struct { - client rest.Interface - ns string -} - -// newPods returns a Pods -func newPods(c *CoreClient, namespace string) *pods { - return &pods{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *pods) Get(name string, options v1.GetOptions) (result *core.Pod, err error) { - result = &core.Pod{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *pods) List(opts v1.ListOptions) (result *core.PodList, err error) { - result = &core.PodList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pods. -func (c *pods) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Create(pod *core.Pod) (result *core.Pod, err error) { - result = &core.Pod{} - err = c.client.Post(). - Namespace(c.ns). - Resource("pods"). - Body(pod). - Do(). - Into(result) - return -} - -// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Update(pod *core.Pod) (result *core.Pod, err error) { - result = &core.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - Body(pod). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *pods) UpdateStatus(pod *core.Pod) (result *core.Pod, err error) { - result = &core.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - SubResource("status"). - Body(pod). - Do(). - Into(result) - return -} - -// Delete takes name of the pod and deletes it. Returns an error if one occurs. -func (c *pods) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *pods) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched pod. -func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Pod, err error) { - result = &core.Pod{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("pods"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod_expansion.go deleted file mode 100644 index 86fead7cced71..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod_expansion.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/api/legacyscheme" - api "k8s.io/kubernetes/pkg/apis/core" -) - -// The PodExpansion interface allows manually adding extra methods to the PodInterface. -type PodExpansion interface { - Bind(binding *api.Binding) error - GetLogs(name string, opts *api.PodLogOptions) *restclient.Request -} - -// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). -func (c *pods) Bind(binding *api.Binding) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() -} - -// Get constructs a request for getting the logs for a pod -func (c *pods) GetLogs(name string, opts *api.PodLogOptions) *restclient.Request { - return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, legacyscheme.ParameterCodec) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go deleted file mode 100644 index a9ef5caec882b..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// PodTemplatesGetter has a method to return a PodTemplateInterface. -// A group's client should implement this interface. -type PodTemplatesGetter interface { - PodTemplates(namespace string) PodTemplateInterface -} - -// PodTemplateInterface has methods to work with PodTemplate resources. -type PodTemplateInterface interface { - Create(*core.PodTemplate) (*core.PodTemplate, error) - Update(*core.PodTemplate) (*core.PodTemplate, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.PodTemplate, error) - List(opts v1.ListOptions) (*core.PodTemplateList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.PodTemplate, err error) - PodTemplateExpansion -} - -// podTemplates implements PodTemplateInterface -type podTemplates struct { - client rest.Interface - ns string -} - -// newPodTemplates returns a PodTemplates -func newPodTemplates(c *CoreClient, namespace string) *podTemplates { - return &podTemplates{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *podTemplates) Get(name string, options v1.GetOptions) (result *core.PodTemplate, err error) { - result = &core.PodTemplate{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *podTemplates) List(opts v1.ListOptions) (result *core.PodTemplateList, err error) { - result = &core.PodTemplateList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podTemplates. -func (c *podTemplates) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Create(podTemplate *core.PodTemplate) (result *core.PodTemplate, err error) { - result = &core.PodTemplate{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podtemplates"). - Body(podTemplate). - Do(). - Into(result) - return -} - -// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Update(podTemplate *core.PodTemplate) (result *core.PodTemplate, err error) { - result = &core.PodTemplate{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podtemplates"). - Name(podTemplate.Name). - Body(podTemplate). - Do(). - Into(result) - return -} - -// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. -func (c *podTemplates) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podTemplates) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched podTemplate. -func (c *podTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.PodTemplate, err error) { - result = &core.PodTemplate{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podtemplates"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go deleted file mode 100644 index 225f020fee104..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go +++ /dev/null @@ -1,206 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ReplicationControllersGetter has a method to return a ReplicationControllerInterface. -// A group's client should implement this interface. -type ReplicationControllersGetter interface { - ReplicationControllers(namespace string) ReplicationControllerInterface -} - -// ReplicationControllerInterface has methods to work with ReplicationController resources. -type ReplicationControllerInterface interface { - Create(*core.ReplicationController) (*core.ReplicationController, error) - Update(*core.ReplicationController) (*core.ReplicationController, error) - UpdateStatus(*core.ReplicationController) (*core.ReplicationController, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.ReplicationController, error) - List(opts v1.ListOptions) (*core.ReplicationControllerList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ReplicationController, err error) - GetScale(replicationControllerName string, options v1.GetOptions) (*autoscaling.Scale, error) - UpdateScale(replicationControllerName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) - - ReplicationControllerExpansion -} - -// replicationControllers implements ReplicationControllerInterface -type replicationControllers struct { - client rest.Interface - ns string -} - -// newReplicationControllers returns a ReplicationControllers -func newReplicationControllers(c *CoreClient, namespace string) *replicationControllers { - return &replicationControllers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *replicationControllers) Get(name string, options v1.GetOptions) (result *core.ReplicationController, err error) { - result = &core.ReplicationController{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *replicationControllers) List(opts v1.ListOptions) (result *core.ReplicationControllerList, err error) { - result = &core.ReplicationControllerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *replicationControllers) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Create(replicationController *core.ReplicationController) (result *core.ReplicationController, err error) { - result = &core.ReplicationController{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Body(replicationController). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Update(replicationController *core.ReplicationController) (result *core.ReplicationController, err error) { - result = &core.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - Body(replicationController). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicationControllers) UpdateStatus(replicationController *core.ReplicationController) (result *core.ReplicationController, err error) { - result = &core.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - SubResource("status"). - Body(replicationController). - Do(). - Into(result) - return -} - -// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *replicationControllers) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicationControllers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched replicationController. -func (c *replicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ReplicationController, err error) { - result = &core.ReplicationController{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicationcontrollers"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} - -// GetScale takes name of the replicationController, and returns the corresponding autoscaling.Scale object, and an error if there is any. -func (c *replicationControllers) GetScale(replicationControllerName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationControllerName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationControllerName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go deleted file mode 100644 index 990451072d07d..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ResourceQuotasGetter has a method to return a ResourceQuotaInterface. -// A group's client should implement this interface. -type ResourceQuotasGetter interface { - ResourceQuotas(namespace string) ResourceQuotaInterface -} - -// ResourceQuotaInterface has methods to work with ResourceQuota resources. -type ResourceQuotaInterface interface { - Create(*core.ResourceQuota) (*core.ResourceQuota, error) - Update(*core.ResourceQuota) (*core.ResourceQuota, error) - UpdateStatus(*core.ResourceQuota) (*core.ResourceQuota, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.ResourceQuota, error) - List(opts v1.ListOptions) (*core.ResourceQuotaList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ResourceQuota, err error) - ResourceQuotaExpansion -} - -// resourceQuotas implements ResourceQuotaInterface -type resourceQuotas struct { - client rest.Interface - ns string -} - -// newResourceQuotas returns a ResourceQuotas -func newResourceQuotas(c *CoreClient, namespace string) *resourceQuotas { - return &resourceQuotas{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *resourceQuotas) Get(name string, options v1.GetOptions) (result *core.ResourceQuota, err error) { - result = &core.ResourceQuota{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *resourceQuotas) List(opts v1.ListOptions) (result *core.ResourceQuotaList, err error) { - result = &core.ResourceQuotaList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *resourceQuotas) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Create(resourceQuota *core.ResourceQuota) (result *core.ResourceQuota, err error) { - result = &core.ResourceQuota{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourcequotas"). - Body(resourceQuota). - Do(). - Into(result) - return -} - -// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Update(resourceQuota *core.ResourceQuota) (result *core.ResourceQuota, err error) { - result = &core.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - Body(resourceQuota). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *resourceQuotas) UpdateStatus(resourceQuota *core.ResourceQuota) (result *core.ResourceQuota, err error) { - result = &core.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - SubResource("status"). - Body(resourceQuota). - Do(). - Into(result) - return -} - -// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. -func (c *resourceQuotas) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceQuotas) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched resourceQuota. -func (c *resourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ResourceQuota, err error) { - result = &core.ResourceQuota{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourcequotas"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go deleted file mode 100644 index c3fe44d5d8748..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// SecretsGetter has a method to return a SecretInterface. -// A group's client should implement this interface. -type SecretsGetter interface { - Secrets(namespace string) SecretInterface -} - -// SecretInterface has methods to work with Secret resources. -type SecretInterface interface { - Create(*core.Secret) (*core.Secret, error) - Update(*core.Secret) (*core.Secret, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.Secret, error) - List(opts v1.ListOptions) (*core.SecretList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Secret, err error) - SecretExpansion -} - -// secrets implements SecretInterface -type secrets struct { - client rest.Interface - ns string -} - -// newSecrets returns a Secrets -func newSecrets(c *CoreClient, namespace string) *secrets { - return &secrets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *secrets) Get(name string, options v1.GetOptions) (result *core.Secret, err error) { - result = &core.Secret{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *secrets) List(opts v1.ListOptions) (result *core.SecretList, err error) { - result = &core.SecretList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *secrets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Create(secret *core.Secret) (result *core.Secret, err error) { - result = &core.Secret{} - err = c.client.Post(). - Namespace(c.ns). - Resource("secrets"). - Body(secret). - Do(). - Into(result) - return -} - -// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Update(secret *core.Secret) (result *core.Secret, err error) { - result = &core.Secret{} - err = c.client.Put(). - Namespace(c.ns). - Resource("secrets"). - Name(secret.Name). - Body(secret). - Do(). - Into(result) - return -} - -// Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *secrets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *secrets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched secret. -func (c *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Secret, err error) { - result = &core.Secret{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("secrets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go deleted file mode 100644 index e05f533762903..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ServicesGetter has a method to return a ServiceInterface. -// A group's client should implement this interface. -type ServicesGetter interface { - Services(namespace string) ServiceInterface -} - -// ServiceInterface has methods to work with Service resources. -type ServiceInterface interface { - Create(*core.Service) (*core.Service, error) - Update(*core.Service) (*core.Service, error) - UpdateStatus(*core.Service) (*core.Service, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.Service, error) - List(opts v1.ListOptions) (*core.ServiceList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Service, err error) - ServiceExpansion -} - -// services implements ServiceInterface -type services struct { - client rest.Interface - ns string -} - -// newServices returns a Services -func newServices(c *CoreClient, namespace string) *services { - return &services{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *services) Get(name string, options v1.GetOptions) (result *core.Service, err error) { - result = &core.Service{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *services) List(opts v1.ListOptions) (result *core.ServiceList, err error) { - result = &core.ServiceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *services) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(service *core.Service) (result *core.Service, err error) { - result = &core.Service{} - err = c.client.Post(). - Namespace(c.ns). - Resource("services"). - Body(service). - Do(). - Into(result) - return -} - -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(service *core.Service) (result *core.Service, err error) { - result = &core.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - Body(service). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *services) UpdateStatus(service *core.Service) (result *core.Service, err error) { - result = &core.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - SubResource("status"). - Body(service). - Do(). - Into(result) - return -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *services) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *services) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched service. -func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Service, err error) { - result = &core.Service{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("services"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service_expansion.go deleted file mode 100644 index 247d0682bdfcf..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service_expansion.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - "k8s.io/apimachinery/pkg/util/net" - restclient "k8s.io/client-go/rest" -) - -// The ServiceExpansion interface allows manually adding extra methods to the ServiceInterface. -type ServiceExpansion interface { - ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper -} - -// ProxyGet returns a response of the service by calling it through the proxy. -func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - request := c.client.Get(). - Namespace(c.ns). - Resource("services"). - SubResource("proxy"). - Name(net.JoinSchemeNamePort(scheme, name, port)). - Suffix(path) - for k, v := range params { - request = request.Param(k, v) - } - return request -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go deleted file mode 100644 index c2b6012c4d062..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - core "k8s.io/kubernetes/pkg/apis/core" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ServiceAccountsGetter has a method to return a ServiceAccountInterface. -// A group's client should implement this interface. -type ServiceAccountsGetter interface { - ServiceAccounts(namespace string) ServiceAccountInterface -} - -// ServiceAccountInterface has methods to work with ServiceAccount resources. -type ServiceAccountInterface interface { - Create(*core.ServiceAccount) (*core.ServiceAccount, error) - Update(*core.ServiceAccount) (*core.ServiceAccount, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*core.ServiceAccount, error) - List(opts v1.ListOptions) (*core.ServiceAccountList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ServiceAccount, err error) - ServiceAccountExpansion -} - -// serviceAccounts implements ServiceAccountInterface -type serviceAccounts struct { - client rest.Interface - ns string -} - -// newServiceAccounts returns a ServiceAccounts -func newServiceAccounts(c *CoreClient, namespace string) *serviceAccounts { - return &serviceAccounts{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *serviceAccounts) Get(name string, options v1.GetOptions) (result *core.ServiceAccount, err error) { - result = &core.ServiceAccount{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *serviceAccounts) List(opts v1.ListOptions) (result *core.ServiceAccountList, err error) { - result = &core.ServiceAccountList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *serviceAccounts) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Create(serviceAccount *core.ServiceAccount) (result *core.ServiceAccount, err error) { - result = &core.ServiceAccount{} - err = c.client.Post(). - Namespace(c.ns). - Resource("serviceaccounts"). - Body(serviceAccount). - Do(). - Into(result) - return -} - -// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Update(serviceAccount *core.ServiceAccount) (result *core.ServiceAccount, err error) { - result = &core.ServiceAccount{} - err = c.client.Put(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(serviceAccount.Name). - Body(serviceAccount). - Do(). - Into(result) - return -} - -// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. -func (c *serviceAccounts) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *serviceAccounts) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched serviceAccount. -func (c *serviceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ServiceAccount, err error) { - result = &core.ServiceAccount{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("serviceaccounts"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/BUILD.bazel deleted file mode 100644 index 230bfde7096b3..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "events_client.go", - "generated_expansion.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/events_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/events_client.go deleted file mode 100644 index 9eb8b7c523fae..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/events_client.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type EventsInterface interface { - RESTClient() rest.Interface -} - -// EventsClient is used to interact with features provided by the events.k8s.io group. -type EventsClient struct { - restClient rest.Interface -} - -// NewForConfig creates a new EventsClient for the given config. -func NewForConfig(c *rest.Config) (*EventsClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &EventsClient{client}, nil -} - -// NewForConfigOrDie creates a new EventsClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *EventsClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new EventsClient for the given RESTClient. -func New(c rest.Interface) *EventsClient { - return &EventsClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("events.k8s.io")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("events.k8s.io")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *EventsClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/generated_expansion.go deleted file mode 100644 index 1b59c8431cea3..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD.bazel deleted file mode 100644 index fb5237840345d..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "daemonset.go", - "deployment.go", - "deployment_expansion.go", - "doc.go", - "extensions_client.go", - "generated_expansion.go", - "ingress.go", - "replicaset.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/daemonset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/daemonset.go deleted file mode 100644 index 66fd1af05ef19..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/daemonset.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// DaemonSetsGetter has a method to return a DaemonSetInterface. -// A group's client should implement this interface. -type DaemonSetsGetter interface { - DaemonSets(namespace string) DaemonSetInterface -} - -// DaemonSetInterface has methods to work with DaemonSet resources. -type DaemonSetInterface interface { - Create(*extensions.DaemonSet) (*extensions.DaemonSet, error) - Update(*extensions.DaemonSet) (*extensions.DaemonSet, error) - UpdateStatus(*extensions.DaemonSet) (*extensions.DaemonSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*extensions.DaemonSet, error) - List(opts v1.ListOptions) (*extensions.DaemonSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.DaemonSet, err error) - DaemonSetExpansion -} - -// daemonSets implements DaemonSetInterface -type daemonSets struct { - client rest.Interface - ns string -} - -// newDaemonSets returns a DaemonSets -func newDaemonSets(c *ExtensionsClient, namespace string) *daemonSets { - return &daemonSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(name string, options v1.GetOptions) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(opts v1.ListOptions) (result *extensions.DaemonSetList, err error) { - result = &extensions.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - Body(daemonSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *daemonSets) UpdateStatus(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go deleted file mode 100644 index 074ea47f72d64..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go +++ /dev/null @@ -1,206 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// DeploymentsGetter has a method to return a DeploymentInterface. -// A group's client should implement this interface. -type DeploymentsGetter interface { - Deployments(namespace string) DeploymentInterface -} - -// DeploymentInterface has methods to work with Deployment resources. -type DeploymentInterface interface { - Create(*extensions.Deployment) (*extensions.Deployment, error) - Update(*extensions.Deployment) (*extensions.Deployment, error) - UpdateStatus(*extensions.Deployment) (*extensions.Deployment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*extensions.Deployment, error) - List(opts v1.ListOptions) (*extensions.DeploymentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.Deployment, err error) - GetScale(deploymentName string, options v1.GetOptions) (*autoscaling.Scale, error) - UpdateScale(deploymentName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) - - DeploymentExpansion -} - -// deployments implements DeploymentInterface -type deployments struct { - client rest.Interface - ns string -} - -// newDeployments returns a Deployments -func newDeployments(c *ExtensionsClient, namespace string) *deployments { - return &deployments{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string, options v1.GetOptions) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts v1.ListOptions) (result *extensions.DeploymentList, err error) { - result = &extensions.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - Body(deployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - Body(deployment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - Body(deployment). - Do(). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} - -// GetScale takes name of the deployment, and returns the corresponding autoscaling.Scale object, and an error if there is any. -func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(deploymentName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *deployments) UpdateScale(deploymentName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deploymentName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment_expansion.go deleted file mode 100644 index f8d7a1c6341c3..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment_expansion.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import "k8s.io/kubernetes/pkg/apis/extensions" - -// The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. -type DeploymentExpansion interface { - Rollback(*extensions.DeploymentRollback) error -} - -// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. -func (c *deployments) Rollback(deploymentRollback *extensions.DeploymentRollback) error { - return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go deleted file mode 100644 index 43ea79be3233b..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type ExtensionsInterface interface { - RESTClient() rest.Interface - DaemonSetsGetter - DeploymentsGetter - IngressesGetter - ReplicaSetsGetter -} - -// ExtensionsClient is used to interact with features provided by the extensions group. -type ExtensionsClient struct { - restClient rest.Interface -} - -func (c *ExtensionsClient) DaemonSets(namespace string) DaemonSetInterface { - return newDaemonSets(c, namespace) -} - -func (c *ExtensionsClient) Deployments(namespace string) DeploymentInterface { - return newDeployments(c, namespace) -} - -func (c *ExtensionsClient) Ingresses(namespace string) IngressInterface { - return newIngresses(c, namespace) -} - -func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface { - return newReplicaSets(c, namespace) -} - -// NewForConfig creates a new ExtensionsClient for the given config. -func NewForConfig(c *rest.Config) (*ExtensionsClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &ExtensionsClient{client}, nil -} - -// NewForConfigOrDie creates a new ExtensionsClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ExtensionsClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new ExtensionsClient for the given RESTClient. -func New(c rest.Interface) *ExtensionsClient { - return &ExtensionsClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("extensions")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("extensions")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *ExtensionsClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go deleted file mode 100644 index bbe7577ccb73b..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// IngressesGetter has a method to return a IngressInterface. -// A group's client should implement this interface. -type IngressesGetter interface { - Ingresses(namespace string) IngressInterface -} - -// IngressInterface has methods to work with Ingress resources. -type IngressInterface interface { - Create(*extensions.Ingress) (*extensions.Ingress, error) - Update(*extensions.Ingress) (*extensions.Ingress, error) - UpdateStatus(*extensions.Ingress) (*extensions.Ingress, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*extensions.Ingress, error) - List(opts v1.ListOptions) (*extensions.IngressList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.Ingress, err error) - IngressExpansion -} - -// ingresses implements IngressInterface -type ingresses struct { - client rest.Interface - ns string -} - -// newIngresses returns a Ingresses -func newIngresses(c *ExtensionsClient, namespace string) *ingresses { - return &ingresses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(name string, options v1.GetOptions) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(opts v1.ListOptions) (result *extensions.IngressList, err error) { - result = &extensions.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - Body(ingress). - Do(). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - Body(ingress). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *ingresses) UpdateStatus(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - Body(ingress). - Do(). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go deleted file mode 100644 index b2263cb09ed5f..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go +++ /dev/null @@ -1,206 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ReplicaSetsGetter has a method to return a ReplicaSetInterface. -// A group's client should implement this interface. -type ReplicaSetsGetter interface { - ReplicaSets(namespace string) ReplicaSetInterface -} - -// ReplicaSetInterface has methods to work with ReplicaSet resources. -type ReplicaSetInterface interface { - Create(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - Update(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - UpdateStatus(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*extensions.ReplicaSet, error) - List(opts v1.ListOptions) (*extensions.ReplicaSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.ReplicaSet, err error) - GetScale(replicaSetName string, options v1.GetOptions) (*autoscaling.Scale, error) - UpdateScale(replicaSetName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) - - ReplicaSetExpansion -} - -// replicaSets implements ReplicaSetInterface -type replicaSets struct { - client rest.Interface - ns string -} - -// newReplicaSets returns a ReplicaSets -func newReplicaSets(c *ExtensionsClient, namespace string) *replicaSets { - return &replicaSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(name string, options v1.GetOptions) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(opts v1.ListOptions) (result *extensions.ReplicaSetList, err error) { - result = &extensions.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - Body(replicaSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicaSets) UpdateStatus(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} - -// GetScale takes name of the replicaSet, and returns the corresponding autoscaling.Scale object, and an error if there is any. -func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSetName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicaSets) UpdateScale(replicaSetName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSetName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/BUILD.bazel deleted file mode 100644 index 5cccbc290d9fa..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "networking_client.go", - "networkpolicy.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/networking:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/generated_expansion.go deleted file mode 100644 index 8406486f4d022..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -type NetworkPolicyExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networking_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networking_client.go deleted file mode 100644 index 1d2f9ab0eaf69..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networking_client.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type NetworkingInterface interface { - RESTClient() rest.Interface - NetworkPoliciesGetter -} - -// NetworkingClient is used to interact with features provided by the networking.k8s.io group. -type NetworkingClient struct { - restClient rest.Interface -} - -func (c *NetworkingClient) NetworkPolicies(namespace string) NetworkPolicyInterface { - return newNetworkPolicies(c, namespace) -} - -// NewForConfig creates a new NetworkingClient for the given config. -func NewForConfig(c *rest.Config) (*NetworkingClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &NetworkingClient{client}, nil -} - -// NewForConfigOrDie creates a new NetworkingClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *NetworkingClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new NetworkingClient for the given RESTClient. -func New(c rest.Interface) *NetworkingClient { - return &NetworkingClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("networking.k8s.io")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("networking.k8s.io")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *NetworkingClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networkpolicy.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networkpolicy.go deleted file mode 100644 index 6a0cdbc3bf609..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networkpolicy.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - networking "k8s.io/kubernetes/pkg/apis/networking" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. -// A group's client should implement this interface. -type NetworkPoliciesGetter interface { - NetworkPolicies(namespace string) NetworkPolicyInterface -} - -// NetworkPolicyInterface has methods to work with NetworkPolicy resources. -type NetworkPolicyInterface interface { - Create(*networking.NetworkPolicy) (*networking.NetworkPolicy, error) - Update(*networking.NetworkPolicy) (*networking.NetworkPolicy, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*networking.NetworkPolicy, error) - List(opts v1.ListOptions) (*networking.NetworkPolicyList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *networking.NetworkPolicy, err error) - NetworkPolicyExpansion -} - -// networkPolicies implements NetworkPolicyInterface -type networkPolicies struct { - client rest.Interface - ns string -} - -// newNetworkPolicies returns a NetworkPolicies -func newNetworkPolicies(c *NetworkingClient, namespace string) *networkPolicies { - return &networkPolicies{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(name string, options v1.GetOptions) (result *networking.NetworkPolicy, err error) { - result = &networking.NetworkPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(opts v1.ListOptions) (result *networking.NetworkPolicyList, err error) { - result = &networking.NetworkPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(networkPolicy *networking.NetworkPolicy) (result *networking.NetworkPolicy, err error) { - result = &networking.NetworkPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("networkpolicies"). - Body(networkPolicy). - Do(). - Into(result) - return -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(networkPolicy *networking.NetworkPolicy) (result *networking.NetworkPolicy, err error) { - result = &networking.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - Body(networkPolicy). - Do(). - Into(result) - return -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *networking.NetworkPolicy, err error) { - result = &networking.NetworkPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("networkpolicies"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD.bazel deleted file mode 100644 index aab32382ffbba..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "eviction.go", - "eviction_expansion.go", - "generated_expansion.go", - "poddisruptionbudget.go", - "podsecuritypolicy.go", - "policy_client.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/policy:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction.go deleted file mode 100644 index 37297cbb877f0..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" -) - -// EvictionsGetter has a method to return a EvictionInterface. -// A group's client should implement this interface. -type EvictionsGetter interface { - Evictions(namespace string) EvictionInterface -} - -// EvictionInterface has methods to work with Eviction resources. -type EvictionInterface interface { - EvictionExpansion -} - -// evictions implements EvictionInterface -type evictions struct { - client rest.Interface - ns string -} - -// newEvictions returns a Evictions -func newEvictions(c *PolicyClient, namespace string) *evictions { - return &evictions{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/generated_expansion.go deleted file mode 100644 index a3ff5ba028ab8..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/generated_expansion.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -type PodDisruptionBudgetExpansion interface{} - -type PodSecurityPolicyExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go deleted file mode 100644 index 7e7abfcb27cce..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - policy "k8s.io/kubernetes/pkg/apis/policy" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. -// A group's client should implement this interface. -type PodDisruptionBudgetsGetter interface { - PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface -} - -// PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources. -type PodDisruptionBudgetInterface interface { - Create(*policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) - Update(*policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) - UpdateStatus(*policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*policy.PodDisruptionBudget, error) - List(opts v1.ListOptions) (*policy.PodDisruptionBudgetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *policy.PodDisruptionBudget, err error) - PodDisruptionBudgetExpansion -} - -// podDisruptionBudgets implements PodDisruptionBudgetInterface -type podDisruptionBudgets struct { - client rest.Interface - ns string -} - -// newPodDisruptionBudgets returns a PodDisruptionBudgets -func newPodDisruptionBudgets(c *PolicyClient, namespace string) *podDisruptionBudgets { - return &podDisruptionBudgets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *policy.PodDisruptionBudgetList, err error) { - result = &policy.PodDisruptionBudgetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Create(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.client.Post(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Body(podDisruptionBudget). - Do(). - Into(result) - return -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Update(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - Body(podDisruptionBudget). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *podDisruptionBudgets) UpdateStatus(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - SubResource("status"). - Body(podDisruptionBudget). - Do(). - Into(result) - return -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *podDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/podsecuritypolicy.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/podsecuritypolicy.go deleted file mode 100644 index 17c162fa47a44..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/podsecuritypolicy.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - policy "k8s.io/kubernetes/pkg/apis/policy" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(*policy.PodSecurityPolicy) (*policy.PodSecurityPolicy, error) - Update(*policy.PodSecurityPolicy) (*policy.PodSecurityPolicy, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*policy.PodSecurityPolicy, error) - List(opts v1.ListOptions) (*policy.PodSecurityPolicyList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *policy.PodSecurityPolicy, err error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client rest.Interface -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *PolicyClient) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c.RESTClient(), - } -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *policy.PodSecurityPolicy, err error) { - result = &policy.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *policy.PodSecurityPolicyList, err error) { - result = &policy.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(podSecurityPolicy *policy.PodSecurityPolicy) (result *policy.PodSecurityPolicy, err error) { - result = &policy.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - Body(podSecurityPolicy). - Do(). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(podSecurityPolicy *policy.PodSecurityPolicy) (result *policy.PodSecurityPolicy, err error) { - result = &policy.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - Body(podSecurityPolicy). - Do(). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *policy.PodSecurityPolicy, err error) { - result = &policy.PodSecurityPolicy{} - err = c.client.Patch(pt). - Resource("podsecuritypolicies"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/policy_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/policy_client.go deleted file mode 100644 index 8040244dd5bd2..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/policy_client.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type PolicyInterface interface { - RESTClient() rest.Interface - EvictionsGetter - PodDisruptionBudgetsGetter - PodSecurityPoliciesGetter -} - -// PolicyClient is used to interact with features provided by the policy group. -type PolicyClient struct { - restClient rest.Interface -} - -func (c *PolicyClient) Evictions(namespace string) EvictionInterface { - return newEvictions(c, namespace) -} - -func (c *PolicyClient) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface { - return newPodDisruptionBudgets(c, namespace) -} - -func (c *PolicyClient) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - -// NewForConfig creates a new PolicyClient for the given config. -func NewForConfig(c *rest.Config) (*PolicyClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &PolicyClient{client}, nil -} - -// NewForConfigOrDie creates a new PolicyClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *PolicyClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new PolicyClient for the given RESTClient. -func New(c rest.Interface) *PolicyClient { - return &PolicyClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("policy")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("policy")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *PolicyClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD.bazel deleted file mode 100644 index e13b815f2959d..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "doc.go", - "generated_expansion.go", - "rbac_client.go", - "role.go", - "rolebinding.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/rbac:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go deleted file mode 100644 index 985e90f16aa84..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ClusterRolesGetter has a method to return a ClusterRoleInterface. -// A group's client should implement this interface. -type ClusterRolesGetter interface { - ClusterRoles() ClusterRoleInterface -} - -// ClusterRoleInterface has methods to work with ClusterRole resources. -type ClusterRoleInterface interface { - Create(*rbac.ClusterRole) (*rbac.ClusterRole, error) - Update(*rbac.ClusterRole) (*rbac.ClusterRole, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*rbac.ClusterRole, error) - List(opts v1.ListOptions) (*rbac.ClusterRoleList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac.ClusterRole, err error) - ClusterRoleExpansion -} - -// clusterRoles implements ClusterRoleInterface -type clusterRoles struct { - client rest.Interface -} - -// newClusterRoles returns a ClusterRoles -func newClusterRoles(c *RbacClient) *clusterRoles { - return &clusterRoles{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(opts v1.ListOptions) (result *rbac.ClusterRoleList, err error) { - result = &rbac.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - Body(clusterRole). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - Body(clusterRole). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go deleted file mode 100644 index 01dcf227524d2..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. -// A group's client should implement this interface. -type ClusterRoleBindingsGetter interface { - ClusterRoleBindings() ClusterRoleBindingInterface -} - -// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. -type ClusterRoleBindingInterface interface { - Create(*rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) - Update(*rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*rbac.ClusterRoleBinding, error) - List(opts v1.ListOptions) (*rbac.ClusterRoleBindingList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac.ClusterRoleBinding, err error) - ClusterRoleBindingExpansion -} - -// clusterRoleBindings implements ClusterRoleBindingInterface -type clusterRoleBindings struct { - client rest.Interface -} - -// newClusterRoleBindings returns a ClusterRoleBindings -func newClusterRoleBindings(c *RbacClient) *clusterRoleBindings { - return &clusterRoleBindings{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *rbac.ClusterRoleBindingList, err error) { - result = &rbac.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - Body(clusterRoleBinding). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - Body(clusterRoleBinding). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/generated_expansion.go deleted file mode 100644 index bd6d9237d9211..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/generated_expansion.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -type ClusterRoleExpansion interface{} - -type ClusterRoleBindingExpansion interface{} - -type RoleExpansion interface{} - -type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rbac_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rbac_client.go deleted file mode 100644 index 269fe8791fea0..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rbac_client.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type RbacInterface interface { - RESTClient() rest.Interface - ClusterRolesGetter - ClusterRoleBindingsGetter - RolesGetter - RoleBindingsGetter -} - -// RbacClient is used to interact with features provided by the rbac.authorization.k8s.io group. -type RbacClient struct { - restClient rest.Interface -} - -func (c *RbacClient) ClusterRoles() ClusterRoleInterface { - return newClusterRoles(c) -} - -func (c *RbacClient) ClusterRoleBindings() ClusterRoleBindingInterface { - return newClusterRoleBindings(c) -} - -func (c *RbacClient) Roles(namespace string) RoleInterface { - return newRoles(c, namespace) -} - -func (c *RbacClient) RoleBindings(namespace string) RoleBindingInterface { - return newRoleBindings(c, namespace) -} - -// NewForConfig creates a new RbacClient for the given config. -func NewForConfig(c *rest.Config) (*RbacClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &RbacClient{client}, nil -} - -// NewForConfigOrDie creates a new RbacClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *RbacClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new RbacClient for the given RESTClient. -func New(c rest.Interface) *RbacClient { - return &RbacClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("rbac.authorization.k8s.io")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("rbac.authorization.k8s.io")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *RbacClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go deleted file mode 100644 index 7272550e8dbcb..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// RolesGetter has a method to return a RoleInterface. -// A group's client should implement this interface. -type RolesGetter interface { - Roles(namespace string) RoleInterface -} - -// RoleInterface has methods to work with Role resources. -type RoleInterface interface { - Create(*rbac.Role) (*rbac.Role, error) - Update(*rbac.Role) (*rbac.Role, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*rbac.Role, error) - List(opts v1.ListOptions) (*rbac.RoleList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac.Role, err error) - RoleExpansion -} - -// roles implements RoleInterface -type roles struct { - client rest.Interface - ns string -} - -// newRoles returns a Roles -func newRoles(c *RbacClient, namespace string) *roles { - return &roles{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(name string, options v1.GetOptions) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(opts v1.ListOptions) (result *rbac.RoleList, err error) { - result = &rbac.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(role *rbac.Role) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - Body(role). - Do(). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(role *rbac.Role) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - Body(role). - Do(). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go deleted file mode 100644 index bed5dcdf9a919..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// RoleBindingsGetter has a method to return a RoleBindingInterface. -// A group's client should implement this interface. -type RoleBindingsGetter interface { - RoleBindings(namespace string) RoleBindingInterface -} - -// RoleBindingInterface has methods to work with RoleBinding resources. -type RoleBindingInterface interface { - Create(*rbac.RoleBinding) (*rbac.RoleBinding, error) - Update(*rbac.RoleBinding) (*rbac.RoleBinding, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*rbac.RoleBinding, error) - List(opts v1.ListOptions) (*rbac.RoleBindingList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac.RoleBinding, err error) - RoleBindingExpansion -} - -// roleBindings implements RoleBindingInterface -type roleBindings struct { - client rest.Interface - ns string -} - -// newRoleBindings returns a RoleBindings -func newRoleBindings(c *RbacClient, namespace string) *roleBindings { - return &roleBindings{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(name string, options v1.GetOptions) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(opts v1.ListOptions) (result *rbac.RoleBindingList, err error) { - result = &rbac.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - Body(roleBinding). - Do(). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - Body(roleBinding). - Do(). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/generated_expansion.go deleted file mode 100644 index df00c75e80c4b..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -type PriorityClassExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/priorityclass.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/priorityclass.go deleted file mode 100644 index 8d5f992da6414..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/priorityclass.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheduling "k8s.io/kubernetes/pkg/apis/scheduling" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// PriorityClassesGetter has a method to return a PriorityClassInterface. -// A group's client should implement this interface. -type PriorityClassesGetter interface { - PriorityClasses() PriorityClassInterface -} - -// PriorityClassInterface has methods to work with PriorityClass resources. -type PriorityClassInterface interface { - Create(*scheduling.PriorityClass) (*scheduling.PriorityClass, error) - Update(*scheduling.PriorityClass) (*scheduling.PriorityClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*scheduling.PriorityClass, error) - List(opts v1.ListOptions) (*scheduling.PriorityClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *scheduling.PriorityClass, err error) - PriorityClassExpansion -} - -// priorityClasses implements PriorityClassInterface -type priorityClasses struct { - client rest.Interface -} - -// newPriorityClasses returns a PriorityClasses -func newPriorityClasses(c *SchedulingClient) *priorityClasses { - return &priorityClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *scheduling.PriorityClass, err error) { - result = &scheduling.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(opts v1.ListOptions) (result *scheduling.PriorityClassList, err error) { - result = &scheduling.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(priorityClass *scheduling.PriorityClass) (result *scheduling.PriorityClass, err error) { - result = &scheduling.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - Body(priorityClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(priorityClass *scheduling.PriorityClass) (result *scheduling.PriorityClass, err error) { - result = &scheduling.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - Body(priorityClass). - Do(). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *scheduling.PriorityClass, err error) { - result = &scheduling.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/scheduling_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/scheduling_client.go deleted file mode 100644 index c30bb6f4d6770..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/scheduling_client.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type SchedulingInterface interface { - RESTClient() rest.Interface - PriorityClassesGetter -} - -// SchedulingClient is used to interact with features provided by the scheduling.k8s.io group. -type SchedulingClient struct { - restClient rest.Interface -} - -func (c *SchedulingClient) PriorityClasses() PriorityClassInterface { - return newPriorityClasses(c) -} - -// NewForConfig creates a new SchedulingClient for the given config. -func NewForConfig(c *rest.Config) (*SchedulingClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &SchedulingClient{client}, nil -} - -// NewForConfigOrDie creates a new SchedulingClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SchedulingClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new SchedulingClient for the given RESTClient. -func New(c rest.Interface) *SchedulingClient { - return &SchedulingClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("scheduling.k8s.io")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("scheduling.k8s.io")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *SchedulingClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/BUILD.bazel deleted file mode 100644 index ca8fd07a24f77..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "podpreset.go", - "settings_client.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/settings:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/generated_expansion.go deleted file mode 100644 index e673a5ca8b656..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -type PodPresetExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/podpreset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/podpreset.go deleted file mode 100644 index 334c2180c0295..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/podpreset.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - settings "k8s.io/kubernetes/pkg/apis/settings" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// PodPresetsGetter has a method to return a PodPresetInterface. -// A group's client should implement this interface. -type PodPresetsGetter interface { - PodPresets(namespace string) PodPresetInterface -} - -// PodPresetInterface has methods to work with PodPreset resources. -type PodPresetInterface interface { - Create(*settings.PodPreset) (*settings.PodPreset, error) - Update(*settings.PodPreset) (*settings.PodPreset, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*settings.PodPreset, error) - List(opts v1.ListOptions) (*settings.PodPresetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *settings.PodPreset, err error) - PodPresetExpansion -} - -// podPresets implements PodPresetInterface -type podPresets struct { - client rest.Interface - ns string -} - -// newPodPresets returns a PodPresets -func newPodPresets(c *SettingsClient, namespace string) *podPresets { - return &podPresets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. -func (c *podPresets) Get(name string, options v1.GetOptions) (result *settings.PodPreset, err error) { - result = &settings.PodPreset{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodPresets that match those selectors. -func (c *podPresets) List(opts v1.ListOptions) (result *settings.PodPresetList, err error) { - result = &settings.PodPresetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podPresets. -func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Create(podPreset *settings.PodPreset) (result *settings.PodPreset, err error) { - result = &settings.PodPreset{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podpresets"). - Body(podPreset). - Do(). - Into(result) - return -} - -// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Update(podPreset *settings.PodPreset) (result *settings.PodPreset, err error) { - result = &settings.PodPreset{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podpresets"). - Name(podPreset.Name). - Body(podPreset). - Do(). - Into(result) - return -} - -// Delete takes name of the podPreset and deletes it. Returns an error if one occurs. -func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched podPreset. -func (c *podPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *settings.PodPreset, err error) { - result = &settings.PodPreset{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podpresets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD.bazel deleted file mode 100644 index 3b3e6935d5a3a..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "storage_client.go", - "storageclass.go", - "volumeattachment.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/storage:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go deleted file mode 100644 index 86602442babdc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go deleted file mode 100644 index d74dc56fe97dc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -type StorageClassExpansion interface{} - -type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go deleted file mode 100644 index 9272e2b9c5ead..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -type StorageInterface interface { - RESTClient() rest.Interface - StorageClassesGetter - VolumeAttachmentsGetter -} - -// StorageClient is used to interact with features provided by the storage.k8s.io group. -type StorageClient struct { - restClient rest.Interface -} - -func (c *StorageClient) StorageClasses() StorageClassInterface { - return newStorageClasses(c) -} - -func (c *StorageClient) VolumeAttachments() VolumeAttachmentInterface { - return newVolumeAttachments(c) -} - -// NewForConfig creates a new StorageClient for the given config. -func NewForConfig(c *rest.Config) (*StorageClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &StorageClient{client}, nil -} - -// NewForConfigOrDie creates a new StorageClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *StorageClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new StorageClient for the given RESTClient. -func New(c rest.Interface) *StorageClient { - return &StorageClient{c} -} - -func setConfigDefaults(config *rest.Config) error { - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("storage.k8s.io")[0].Group { - gv := scheme.Scheme.PrioritizedVersionsForGroup("storage.k8s.io")[0] - config.GroupVersion = &gv - } - config.NegotiatedSerializer = scheme.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *StorageClient) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go deleted file mode 100644 index 4939d7af8c3c3..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - storage "k8s.io/kubernetes/pkg/apis/storage" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// StorageClassesGetter has a method to return a StorageClassInterface. -// A group's client should implement this interface. -type StorageClassesGetter interface { - StorageClasses() StorageClassInterface -} - -// StorageClassInterface has methods to work with StorageClass resources. -type StorageClassInterface interface { - Create(*storage.StorageClass) (*storage.StorageClass, error) - Update(*storage.StorageClass) (*storage.StorageClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*storage.StorageClass, error) - List(opts v1.ListOptions) (*storage.StorageClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storage.StorageClass, err error) - StorageClassExpansion -} - -// storageClasses implements StorageClassInterface -type storageClasses struct { - client rest.Interface -} - -// newStorageClasses returns a StorageClasses -func newStorageClasses(c *StorageClient) *storageClasses { - return &storageClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(name string, options v1.GetOptions) (result *storage.StorageClass, err error) { - result = &storage.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(opts v1.ListOptions) (result *storage.StorageClassList, err error) { - result = &storage.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(storageClass *storage.StorageClass) (result *storage.StorageClass, err error) { - result = &storage.StorageClass{} - err = c.client.Post(). - Resource("storageclasses"). - Body(storageClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(storageClass *storage.StorageClass) (result *storage.StorageClass, err error) { - result = &storage.StorageClass{} - err = c.client.Put(). - Resource("storageclasses"). - Name(storageClass.Name). - Body(storageClass). - Do(). - Into(result) - return -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storage.StorageClass, err error) { - result = &storage.StorageClass{} - err = c.client.Patch(pt). - Resource("storageclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md b/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md deleted file mode 100644 index c647238b777e2..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md +++ /dev/null @@ -1,18 +0,0 @@ -##### Deprecation Notice: cloud providers in this directory are deprecated and will be removed in favor of external (a.k.a out-of-tree) providers. Existing providers in this directory (a.k.a in-tree providers) should only make small incremental changes as needed and avoid large refactors or new features. New providers seeking to support Kubernetes should follow the out-of-tree model as specified in the [Running Kubernetes Cloud Controller Manager docs](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/). For more information on the future of Kubernetes cloud providers see [KEP-0002](https://github.com/kubernetes/community/blob/master/keps/sig-cloud-provider/0002-cloud-controller-manager.md) and [KEP-0013](https://github.com/kubernetes/community/blob/master/keps/sig-cloud-provider/0013-build-deploy-ccm.md). - -Cloud Providers in this directory will continue to be actively developed or maintained and supported at their current level of support as a longer-term solution evolves. - -## Overview: -The mechanism for supporting cloud providers is currently in transition: the original method of implementing cloud provider-specific functionality within the main kubernetes tree (here) is no longer advised; however, the proposed solution is still in development. - -#### Guidance for potential cloud providers: -* Support for cloud providers is currently in a state of flux. Background information on motivation and the proposal for improving is in the github [proposal](https://git.k8s.io/community/contributors/design-proposals/cloud-provider/cloud-provider-refactoring.md). -* In support of this plan, a new cloud-controller-manager binary was added in 1.6. This was the first of several steps (see the proposal for more information). -* Attempts to contribute new cloud providers or (to a lesser extent) persistent volumes to the core repo will likely meet with some pushback from reviewers/approvers. -* It is understood that this is an unfortunate situation in which 'the old way is no longer supported but the new way is not ready yet', but the initial path is unsustainable, and contributors are encouraged to participate in the implementation of the proposed long-term solution, as there is risk that PRs for new cloud providers here will not be approved. -* Though the fully productized support envisioned in the proposal is still 2 - 3 releases out, the foundational work is underway, and a motivated cloud provider could accomplish the work in a forward-looking way. Contributors are encouraged to assist with the implementation of the design outlined in the proposal. - -#### Some additional context on status / direction: -* 1.6 added a new cloud-controller-manager binary that may be used for testing the new out-of-core cloudprovider flow. -* Setting cloud-provider=external allows for creation of a separate controller-manager binary -* 1.7 adds [extensible admission control](https://git.k8s.io/community/contributors/design-proposals/api-machinery/admission_control_extension.md), further enabling topology customization. diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD.bazel index d128c37d10652..00c7e8f386910 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD.bazel @@ -37,7 +37,6 @@ go_library( "//vendor/github.com/aws/aws-sdk-go/service/elbv2:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/kms:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/sts:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -50,8 +49,9 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/cloud-provider:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/api/v1/service:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/cloudprovider:go_default_library", "//vendor/k8s.io/kubernetes/pkg/controller:go_default_library", "//vendor/k8s.io/kubernetes/pkg/credentialprovider/aws:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubelet/apis:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go index 5ef7de1b42558..d4f38dc75f4e7 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go @@ -22,13 +22,12 @@ import ( "fmt" "io" "net" + "path" "strconv" "strings" "sync" "time" - gcfg "gopkg.in/gcfg.v1" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" @@ -43,11 +42,8 @@ import ( "github.com/aws/aws-sdk-go/service/elbv2" "github.com/aws/aws-sdk-go/service/kms" "github.com/aws/aws-sdk-go/service/sts" - "github.com/golang/glog" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/record" - - "path" + gcfg "gopkg.in/gcfg.v1" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -55,10 +51,12 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/api/v1/service" - "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume" @@ -141,9 +139,14 @@ const ServiceAnnotationLoadBalancerConnectionIdleTimeout = "service.beta.kuberne const ServiceAnnotationLoadBalancerCrossZoneLoadBalancingEnabled = "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled" // ServiceAnnotationLoadBalancerExtraSecurityGroups is the annotation used -// one the service to specify additional security groups to be added to ELB created +// on the service to specify additional security groups to be added to ELB created const ServiceAnnotationLoadBalancerExtraSecurityGroups = "service.beta.kubernetes.io/aws-load-balancer-extra-security-groups" +// ServiceAnnotationLoadBalancerSecurityGroups is the annotation used +// on the service to specify the security groups to be added to ELB created. Differently from the annotation +// "service.beta.kubernetes.io/aws-load-balancer-extra-security-groups", this replaces all other security groups previously assigned to the ELB. +const ServiceAnnotationLoadBalancerSecurityGroups = "service.beta.kubernetes.io/aws-load-balancer-security-groups" + // ServiceAnnotationLoadBalancerCertificate is the annotation used on the // service to request a secure listener. Value is a valid certificate ARN. // For more, see http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html @@ -255,7 +258,7 @@ const MaxReadThenCreateRetries = 30 // need hardcoded defaults. const DefaultVolumeType = "gp2" -// Used to call RecognizeWellKnownRegions just once +// Used to call recognizeWellKnownRegions just once var once sync.Once // AWS implements PVLabeler. @@ -423,7 +426,7 @@ type VolumeOptions struct { Encrypted bool // fully qualified resource name to the key to use for encryption. // example: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef - KmsKeyId string + KmsKeyID string } // Volumes is an interface for managing cloud-provisioned volumes @@ -506,7 +509,7 @@ type Cloud struct { // attached, to avoid a race condition where we assign a device mapping // and then get a second request before we attach the volume attachingMutex sync.Mutex - attaching map[types.NodeName]map[mountDevice]awsVolumeID + attaching map[types.NodeName]map[mountDevice]EBSVolumeID // state of our device allocator for each node deviceAllocators map[types.NodeName]DeviceAllocator @@ -964,7 +967,7 @@ func init() { Client: ec2metadata.New(sess), } } else { - glog.Infof("Using AWS assumed role %v", cfg.Global.RoleARN) + klog.Infof("Using AWS assumed role %v", cfg.Global.RoleARN) provider = &stscreds.AssumeRoleProvider{ Client: sts.New(sess), RoleARN: cfg.Global.RoleARN, @@ -1001,7 +1004,7 @@ func readAWSCloudConfig(config io.Reader) (*CloudConfig, error) { func updateConfigZone(cfg *CloudConfig, metadata EC2Metadata) error { if cfg.Global.Zone == "" { if metadata != nil { - glog.Info("Zone not specified in configuration file; querying AWS metadata service") + klog.Info("Zone not specified in configuration file; querying AWS metadata service") var err error cfg.Global.Zone, err = getAvailabilityZone(metadata) if err != nil { @@ -1016,10 +1019,6 @@ func updateConfigZone(cfg *CloudConfig, metadata EC2Metadata) error { return nil } -func getInstanceType(metadata EC2Metadata) (string, error) { - return metadata.GetMetadata("instance-type") -} - func getAvailabilityZone(metadata EC2Metadata) (string, error) { return metadata.GetMetadata("placement/availability-zone") } @@ -1039,7 +1038,7 @@ func azToRegion(az string) (string, error) { func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { // We have some state in the Cloud object - in particular the attaching map // Log so that if we are building multiple Cloud objects, it is obvious! - glog.Infof("Building AWS cloudprovider") + klog.Infof("Building AWS cloudprovider") metadata, err := awsServices.Metadata() if err != nil { @@ -1062,7 +1061,7 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { // Trust that if we get a region from configuration or AWS metadata that it is valid, // and register ECR providers - RecognizeRegion(regionName) + recognizeRegion(regionName) if !cfg.Global.DisableStrictZoneCheck { valid := isRegionValid(regionName) @@ -1071,7 +1070,7 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { return nil, fmt.Errorf("not a valid AWS zone (unknown region): %s", zone) } } else { - glog.Warningf("Strict AWS zone checking is disabled. Proceeding with zone: %s", zone) + klog.Warningf("Strict AWS zone checking is disabled. Proceeding with zone: %s", zone) } ec2, err := awsServices.Compute(regionName) @@ -1109,7 +1108,7 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { cfg: &cfg, region: regionName, - attaching: make(map[types.NodeName]map[mountDevice]awsVolumeID), + attaching: make(map[types.NodeName]map[mountDevice]EBSVolumeID), deviceAllocators: make(map[types.NodeName]DeviceAllocator), } awsCloud.instanceCache.cloud = awsCloud @@ -1118,7 +1117,7 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { if cfg.Global.VPC != "" && (cfg.Global.SubnetID != "" || cfg.Global.RoleARN != "") && tagged { // When the master is running on a different AWS account, cloud provider or on-premise // build up a dummy instance and use the VPC from the nodes account - glog.Info("Master is configured to run on a different AWS account, different cloud provider or on-premises") + klog.Info("Master is configured to run on a different AWS account, different cloud provider or on-premises") awsCloud.selfAWSInstance = &awsInstance{ nodeName: "master-dummy", vpcID: cfg.Global.VPC, @@ -1151,18 +1150,18 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { // Register regions, in particular for ECR credentials once.Do(func() { - RecognizeWellKnownRegions() + recognizeWellKnownRegions() }) return awsCloud, nil } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (c *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) { +func (c *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { c.clientBuilder = clientBuilder c.kubeClient = clientBuilder.ClientOrDie("aws-cloud-provider") c.eventBroadcaster = record.NewBroadcaster() - c.eventBroadcaster.StartLogging(glog.Infof) + c.eventBroadcaster.StartLogging(klog.Infof) c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.kubeClient.CoreV1().Events("")}) c.eventRecorder = c.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "aws-cloud-provider"}) } @@ -1233,7 +1232,7 @@ func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.No if err != nil { //TODO: It would be nice to be able to determine the reason for the failure, // but the AWS client masks all failures with the same error description. - glog.V(4).Info("Could not determine public IP from AWS metadata.") + klog.V(4).Info("Could not determine public IP from AWS metadata.") } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalIP, Address: externalIP}) } @@ -1242,7 +1241,7 @@ func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.No if err != nil || len(internalDNS) == 0 { //TODO: It would be nice to be able to determine the reason for the failure, // but the AWS client masks all failures with the same error description. - glog.V(4).Info("Could not determine private DNS from AWS metadata.") + klog.V(4).Info("Could not determine private DNS from AWS metadata.") } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: internalDNS}) addresses = append(addresses, v1.NodeAddress{Type: v1.NodeHostName, Address: internalDNS}) @@ -1252,7 +1251,7 @@ func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.No if err != nil || len(externalDNS) == 0 { //TODO: It would be nice to be able to determine the reason for the failure, // but the AWS client masks all failures with the same error description. - glog.V(4).Info("Could not determine public DNS from AWS metadata.") + klog.V(4).Info("Could not determine public DNS from AWS metadata.") } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalDNS, Address: externalDNS}) } @@ -1361,7 +1360,7 @@ func (c *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID strin state := instances[0].State.Name if *state == ec2.InstanceStateNameTerminated { - glog.Warningf("the instance %s is terminated", instanceID) + klog.Warningf("the instance %s is terminated", instanceID) return false, nil } @@ -1384,7 +1383,7 @@ func (c *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID str return false, err } if len(instances) == 0 { - glog.Warningf("the instance %s does not exist anymore", providerID) + klog.Warningf("the instance %s does not exist anymore", providerID) // returns false, because otherwise node is not deleted from cluster // false means that it will continue to check InstanceExistsByProviderID return false, nil @@ -1486,7 +1485,7 @@ func (c *Cloud) GetCandidateZonesForDynamicVolume() (sets.String, error) { } if master { - glog.V(4).Infof("Ignoring master instance %q in zone discovery", aws.StringValue(instance.InstanceId)) + klog.V(4).Infof("Ignoring master instance %q in zone discovery", aws.StringValue(instance.InstanceId)) continue } @@ -1496,7 +1495,7 @@ func (c *Cloud) GetCandidateZonesForDynamicVolume() (sets.String, error) { } } - glog.V(2).Infof("Found instances in zones %s", zones) + klog.V(2).Infof("Found instances in zones %s", zones) return zones, nil } @@ -1546,11 +1545,6 @@ func (c *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) } -// Abstraction around AWS Instance Types -// There isn't an API to get information for a particular instance type (that I know of) -type awsInstanceType struct { -} - // Used to represent a mount device for attaching an EBS volume // This should be stored as a single letter (i.e. c, not sdc or /dev/sdc) type mountDevice string @@ -1596,13 +1590,6 @@ func newAWSInstance(ec2Service EC2, instance *ec2.Instance) *awsInstance { return self } -// Gets the awsInstanceType that models the instance type of this instance -func (i *awsInstance) getInstanceType() *awsInstanceType { - // TODO: Make this real - awsInstanceType := &awsInstanceType{} - return awsInstanceType -} - // Gets the full information about this instance from the EC2 API func (i *awsInstance) describeInstance() (*ec2.Instance, error) { return describeInstance(i.ec2, awsInstanceID(i.awsID)) @@ -1614,14 +1601,10 @@ func (i *awsInstance) describeInstance() (*ec2.Instance, error) { func (c *Cloud) getMountDevice( i *awsInstance, info *ec2.Instance, - volumeID awsVolumeID, + volumeID EBSVolumeID, assign bool) (assigned mountDevice, alreadyAttached bool, err error) { - instanceType := i.getInstanceType() - if instanceType == nil { - return "", false, fmt.Errorf("could not get instance type for instance: %s", i.awsID) - } - deviceMappings := map[mountDevice]awsVolumeID{} + deviceMappings := map[mountDevice]EBSVolumeID{} for _, blockDevice := range info.BlockDeviceMappings { name := aws.StringValue(blockDevice.DeviceName) if strings.HasPrefix(name, "/dev/sd") { @@ -1631,9 +1614,9 @@ func (c *Cloud) getMountDevice( name = name[8:] } if len(name) < 1 || len(name) > 2 { - glog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName)) + klog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName)) } - deviceMappings[mountDevice(name)] = awsVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId)) + deviceMappings[mountDevice(name)] = EBSVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId)) } // We lock to prevent concurrent mounts from conflicting @@ -1650,7 +1633,7 @@ func (c *Cloud) getMountDevice( for mountDevice, mappingVolumeID := range deviceMappings { if volumeID == mappingVolumeID { if assign { - glog.Warningf("Got assignment call for already-assigned volume: %s@%s", mountDevice, mappingVolumeID) + klog.Warningf("Got assignment call for already-assigned volume: %s@%s", mountDevice, mappingVolumeID) } return mountDevice, true, nil } @@ -1675,24 +1658,24 @@ func (c *Cloud) getMountDevice( chosen, err := deviceAllocator.GetNext(deviceMappings) if err != nil { - glog.Warningf("Could not assign a mount device. mappings=%v, error: %v", deviceMappings, err) - return "", false, fmt.Errorf("Too many EBS volumes attached to node %s.", i.nodeName) + klog.Warningf("Could not assign a mount device. mappings=%v, error: %v", deviceMappings, err) + return "", false, fmt.Errorf("too many EBS volumes attached to node %s", i.nodeName) } attaching := c.attaching[i.nodeName] if attaching == nil { - attaching = make(map[mountDevice]awsVolumeID) + attaching = make(map[mountDevice]EBSVolumeID) c.attaching[i.nodeName] = attaching } attaching[chosen] = volumeID - glog.V(2).Infof("Assigned mount device %s -> volume %s", chosen, volumeID) + klog.V(2).Infof("Assigned mount device %s -> volume %s", chosen, volumeID) return chosen, false, nil } // endAttaching removes the entry from the "attachments in progress" map // It returns true if it was found (and removed), false otherwise -func (c *Cloud) endAttaching(i *awsInstance, volumeID awsVolumeID, mountDevice mountDevice) bool { +func (c *Cloud) endAttaching(i *awsInstance, volumeID EBSVolumeID, mountDevice mountDevice) bool { c.attachingMutex.Lock() defer c.attachingMutex.Unlock() @@ -1705,10 +1688,10 @@ func (c *Cloud) endAttaching(i *awsInstance, volumeID awsVolumeID, mountDevice m // attached to the instance (as reported by the EC2 API). So if endAttaching comes after // a 10 second poll delay, we might well have had a concurrent request to allocate a mountpoint, // which because we allocate sequentially is _very_ likely to get the immediately freed volume - glog.Infof("endAttaching on device %q assigned to different volume: %q vs %q", mountDevice, volumeID, existingVolumeID) + klog.Infof("endAttaching on device %q assigned to different volume: %q vs %q", mountDevice, volumeID, existingVolumeID) return false } - glog.V(2).Infof("Releasing in-process attachment entry: %s -> volume %s", mountDevice, volumeID) + klog.V(2).Infof("Releasing in-process attachment entry: %s -> volume %s", mountDevice, volumeID) delete(c.attaching[i.nodeName], mountDevice) return true } @@ -1719,7 +1702,7 @@ type awsDisk struct { // Name in k8s name KubernetesVolumeID // id in AWS - awsID awsVolumeID + awsID EBSVolumeID } func newAWSDisk(aws *Cloud, name KubernetesVolumeID) (*awsDisk, error) { @@ -1832,7 +1815,7 @@ func (d *awsDisk) modifyVolume(requestGiB int64) (int64, error) { func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) { node, fetchErr := c.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{}) if fetchErr != nil { - glog.Errorf("Error fetching node %s with %v", nodeName, fetchErr) + klog.Errorf("Error fetching node %s with %v", nodeName, fetchErr) return } @@ -1843,7 +1826,7 @@ func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) } err := controller.AddOrUpdateTaintOnNode(c.kubeClient, string(nodeName), taint) if err != nil { - glog.Errorf("Error applying taint to node %s with error %v", nodeName, err) + klog.Errorf("Error applying taint to node %s with error %v", nodeName, err) return } c.eventRecorder.Eventf(node, v1.EventTypeWarning, volumeAttachmentStuck, reason) @@ -1871,7 +1854,7 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, if isAWSErrorVolumeNotFound(err) { if status == "detached" { // The disk doesn't exist, assume it's detached, log warning and stop waiting - glog.Warningf("Waiting for volume %q to be detached but the volume does not exist", d.awsID) + klog.Warningf("Waiting for volume %q to be detached but the volume does not exist", d.awsID) stateStr := "detached" attachment = &ec2.VolumeAttachment{ State: &stateStr, @@ -1880,7 +1863,7 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, } if status == "attached" { // The disk doesn't exist, complain, give up waiting and report error - glog.Warningf("Waiting for volume %q to be attached but the volume does not exist", d.awsID) + klog.Warningf("Waiting for volume %q to be attached but the volume does not exist", d.awsID) return false, err } } @@ -1888,29 +1871,30 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, if describeErrorCount > volumeAttachmentStatusConsecutiveErrorLimit { // report the error return false, err - } else { - glog.Warningf("Ignoring error from describe volume for volume %q; will retry: %q", d.awsID, err) - return false, nil } - } else { - describeErrorCount = 0 + + klog.Warningf("Ignoring error from describe volume for volume %q; will retry: %q", d.awsID, err) + return false, nil } + + describeErrorCount = 0 + if len(info.Attachments) > 1 { // Shouldn't happen; log so we know if it is - glog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) + klog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) } attachmentStatus := "" for _, a := range info.Attachments { if attachmentStatus != "" { // Shouldn't happen; log so we know if it is - glog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) + klog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) } if a.State != nil { attachment = a attachmentStatus = *a.State } else { // Shouldn't happen; log so we know if it is - glog.Warningf("Ignoring nil attachment state for volume %q: %v", d.awsID, a) + klog.Warningf("Ignoring nil attachment state for volume %q: %v", d.awsID, a) } } if attachmentStatus == "" { @@ -1921,7 +1905,7 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, return true, nil } // continue waiting - glog.V(2).Infof("Waiting for volume %q state: actual=%s, desired=%s", d.awsID, attachmentStatus, status) + klog.V(2).Infof("Waiting for volume %q state: actual=%s, desired=%s", d.awsID, attachmentStatus, status) return false, nil }) @@ -1972,23 +1956,6 @@ func (c *Cloud) buildSelfAWSInstance() (*awsInstance, error) { return newAWSInstance(c.ec2, instance), nil } -// Gets the awsInstance with for the node with the specified nodeName, or the 'self' instance if nodeName == "" -func (c *Cloud) getAwsInstance(nodeName types.NodeName) (*awsInstance, error) { - var awsInstance *awsInstance - if nodeName == "" { - awsInstance = c.selfAWSInstance - } else { - instance, err := c.getInstanceByNodeName(nodeName) - if err != nil { - return nil, err - } - - awsInstance = newAWSInstance(c.ec2, instance) - } - - return awsInstance, nil -} - // wrapAttachError wraps the error returned by an AttachVolume request with // additional information, if needed and possible. func wrapAttachError(err error, disk *awsDisk, instance string) error { @@ -1996,11 +1963,11 @@ func wrapAttachError(err error, disk *awsDisk, instance string) error { if awsError.Code() == "VolumeInUse" { info, err := disk.describeVolume() if err != nil { - glog.Errorf("Error describing volume %q: %q", disk.awsID, err) + klog.Errorf("Error describing volume %q: %q", disk.awsID, err) } else { for _, a := range info.Attachments { - if disk.awsID != awsVolumeID(aws.StringValue(a.VolumeId)) { - glog.Warningf("Expected to get attachment info of volume %q but instead got info of %q", disk.awsID, aws.StringValue(a.VolumeId)) + if disk.awsID != EBSVolumeID(aws.StringValue(a.VolumeId)) { + klog.Warningf("Expected to get attachment info of volume %q but instead got info of %q", disk.awsID, aws.StringValue(a.VolumeId)) } else if aws.StringValue(a.State) == "attached" { return fmt.Errorf("Error attaching EBS volume %q to instance %q: %q. The volume is currently attached to instance %q", disk.awsID, instance, awsError, aws.StringValue(a.InstanceId)) } @@ -2034,7 +2001,7 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) defer func() { if attachEnded { if !c.endAttaching(awsInstance, disk.awsID, mountDevice) { - glog.Errorf("endAttaching called for disk %q when attach not in progress", disk.awsID) + klog.Errorf("endAttaching called for disk %q when attach not in progress", disk.awsID) } } }() @@ -2053,7 +2020,7 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) if !alreadyAttached { available, err := c.checkIfAvailable(disk, "attaching", awsInstance.awsID) if err != nil { - glog.Error(err) + klog.Error(err) } if !available { @@ -2075,7 +2042,7 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) if da, ok := c.deviceAllocators[awsInstance.nodeName]; ok { da.Deprioritize(mountDevice) } - glog.V(2).Infof("AttachVolume volume=%q instance=%q request returned %v", disk.awsID, awsInstance.awsID, attachResponse) + klog.V(2).Infof("AttachVolume volume=%q instance=%q request returned %v", disk.awsID, awsInstance.awsID, attachResponse) } attachment, err := disk.waitForAttachmentStatus("attached") @@ -2113,15 +2080,15 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) if err != nil { if isAWSErrorVolumeNotFound(err) { // Someone deleted the volume being detached; complain, but do nothing else and return success - glog.Warningf("DetachDisk %s called for node %s but volume does not exist; assuming the volume is detached", diskName, nodeName) + klog.Warningf("DetachDisk %s called for node %s but volume does not exist; assuming the volume is detached", diskName, nodeName) return "", nil - } else { - return "", err } + + return "", err } if !attached && diskInfo.ec2Instance != nil { - glog.Warningf("DetachDisk %s called for node %s but volume is attached to node %s", diskName, nodeName, diskInfo.nodeName) + klog.Warningf("DetachDisk %s called for node %s but volume is attached to node %s", diskName, nodeName, diskInfo.nodeName) return "", nil } @@ -2137,7 +2104,7 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) } if !alreadyAttached { - glog.Warningf("DetachDisk called on non-attached disk: %s", diskName) + klog.Warningf("DetachDisk called on non-attached disk: %s", diskName) // TODO: Continue? Tolerate non-attached error from the AWS DetachVolume call? } @@ -2164,7 +2131,7 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) } if attachment != nil { // We expect it to be nil, it is (maybe) interesting if it is not - glog.V(2).Infof("waitForAttachmentStatus returned non-nil attachment with state=detached: %v", attachment) + klog.V(2).Infof("waitForAttachmentStatus returned non-nil attachment with state=detached: %v", attachment) } if mountDevice != "" { @@ -2208,47 +2175,50 @@ func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (KubernetesVolumeID, er return "", fmt.Errorf("invalid AWS VolumeType %q", volumeOptions.VolumeType) } - // TODO: Should we tag this with the cluster id (so it gets deleted when the cluster does?) request := &ec2.CreateVolumeInput{} request.AvailabilityZone = aws.String(volumeOptions.AvailabilityZone) request.Size = aws.Int64(int64(volumeOptions.CapacityGB)) request.VolumeType = aws.String(createType) request.Encrypted = aws.Bool(volumeOptions.Encrypted) - if len(volumeOptions.KmsKeyId) > 0 { - request.KmsKeyId = aws.String(volumeOptions.KmsKeyId) + if len(volumeOptions.KmsKeyID) > 0 { + request.KmsKeyId = aws.String(volumeOptions.KmsKeyID) request.Encrypted = aws.Bool(true) } if iops > 0 { request.Iops = aws.Int64(iops) } + + tags := volumeOptions.Tags + tags = c.tagging.buildTags(ResourceLifecycleOwned, tags) + + var tagList []*ec2.Tag + for k, v := range tags { + tagList = append(tagList, &ec2.Tag{ + Key: aws.String(k), Value: aws.String(v), + }) + } + request.TagSpecifications = append(request.TagSpecifications, &ec2.TagSpecification{ + Tags: tagList, + ResourceType: aws.String(ec2.ResourceTypeVolume), + }) + response, err := c.ec2.CreateVolume(request) if err != nil { return "", err } - awsID := awsVolumeID(aws.StringValue(response.VolumeId)) + awsID := EBSVolumeID(aws.StringValue(response.VolumeId)) if awsID == "" { return "", fmt.Errorf("VolumeID was not returned by CreateVolume") } volumeName := KubernetesVolumeID("aws://" + aws.StringValue(response.AvailabilityZone) + "/" + string(awsID)) - // apply tags - if err := c.tagging.createTags(c.ec2, string(awsID), ResourceLifecycleOwned, volumeOptions.Tags); err != nil { - // delete the volume and hope it succeeds - _, delerr := c.DeleteDisk(volumeName) - if delerr != nil { - // delete did not succeed, we have a stray volume! - return "", fmt.Errorf("error tagging volume %s, could not delete the volume: %q", volumeName, delerr) - } - return "", fmt.Errorf("error tagging volume %s: %q", volumeName, err) - } - // AWS has a bad habbit of reporting success when creating a volume with // encryption keys that either don't exists or have wrong permissions. // Such volume lives for couple of seconds and then it's silently deleted // by AWS. There is no other check to ensure that given KMS key is correct, // because Kubernetes may have limited permissions to the key. - if len(volumeOptions.KmsKeyId) > 0 { + if len(volumeOptions.KmsKeyID) > 0 { err := c.waitUntilVolumeAvailable(volumeName) if err != nil { if isAWSErrorVolumeNotFound(err) { @@ -2298,10 +2268,10 @@ func (c *Cloud) DeleteDisk(volumeName KubernetesVolumeID) (bool, error) { available, err := c.checkIfAvailable(awsDisk, "deleting", "") if err != nil { if isAWSErrorVolumeNotFound(err) { - glog.V(2).Infof("Volume %s not found when deleting it, assuming it's deleted", awsDisk.awsID) + klog.V(2).Infof("Volume %s not found when deleting it, assuming it's deleted", awsDisk.awsID) return false, nil } - glog.Error(err) + klog.Error(err) } if !available { @@ -2315,7 +2285,7 @@ func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string) info, err := disk.describeVolume() if err != nil { - glog.Errorf("Error describing volume %q: %q", disk.awsID, err) + klog.Errorf("Error describing volume %q: %q", disk.awsID, err) // if for some reason we can not describe volume we will return error return false, err } @@ -2331,11 +2301,11 @@ func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string) // Volume is attached somewhere else and we can not attach it here if len(info.Attachments) > 0 { attachment := info.Attachments[0] - instanceId := aws.StringValue(attachment.InstanceId) - attachedInstance, ierr := c.getInstanceByID(instanceId) - attachErr := fmt.Sprintf("%s since volume is currently attached to %q", opError, instanceId) + instanceID := aws.StringValue(attachment.InstanceId) + attachedInstance, ierr := c.getInstanceByID(instanceID) + attachErr := fmt.Sprintf("%s since volume is currently attached to %q", opError, instanceID) if ierr != nil { - glog.Error(attachErr) + klog.Error(attachErr) return false, errors.New(attachErr) } devicePath := aws.StringValue(attachment.Device) @@ -2352,6 +2322,7 @@ func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string) return true, nil } +// GetLabelsForVolume gets the volume labels for a volume func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) { // Ignore any volumes that are being provisioned if pv.Spec.AWSElasticBlockStore.VolumeID == volume.ProvisionedVolumeName { @@ -2415,16 +2386,18 @@ func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeN if err != nil { if isAWSErrorVolumeNotFound(err) { // The disk doesn't exist, can't be attached - glog.Warningf("DiskIsAttached called for volume %s on node %s but the volume does not exist", diskName, nodeName) + klog.Warningf("DiskIsAttached called for volume %s on node %s but the volume does not exist", diskName, nodeName) return false, nil - } else { - return true, err } + + return true, err } return attached, nil } +// DisksAreAttached returns a map of nodes and Kubernetes volume IDs indicating +// if the volumes are attached to the node func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolumeID) (map[types.NodeName]map[KubernetesVolumeID]bool, error) { attached := make(map[types.NodeName]map[KubernetesVolumeID]bool) @@ -2450,7 +2423,7 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume } if len(awsInstances) == 0 { - glog.V(2).Infof("DisksAreAttached found no instances matching node names; will assume disks not attached") + klog.V(2).Infof("DisksAreAttached found no instances matching node names; will assume disks not attached") return attached, nil } @@ -2473,7 +2446,7 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume continue } - idToDiskName := make(map[awsVolumeID]KubernetesVolumeID) + idToDiskName := make(map[EBSVolumeID]KubernetesVolumeID) for _, diskName := range diskNames { volumeID, err := diskName.MapToAWSVolumeID() if err != nil { @@ -2483,7 +2456,7 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume } for _, blockDevice := range awsInstance.BlockDeviceMappings { - volumeID := awsVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId)) + volumeID := EBSVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId)) diskName, found := idToDiskName[volumeID] if found { // Disk is still attached to node @@ -2495,6 +2468,8 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume return attached, nil } +// ResizeDisk resizes an EBS volume in GiB increments, it will round up to the +// next GiB if arguments are not provided in even GiB increments func (c *Cloud) ResizeDisk( diskName KubernetesVolumeID, oldSize resource.Quantity, @@ -2543,7 +2518,7 @@ func (c *Cloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, var ret *elb.LoadBalancerDescription for _, loadBalancer := range response.LoadBalancerDescriptions { if ret != nil { - glog.Errorf("Found multiple load balancers with name: %s", name) + klog.Errorf("Found multiple load balancers with name: %s", name) } ret = loadBalancer } @@ -2628,7 +2603,7 @@ func (c *Cloud) findSecurityGroup(securityGroupID string) (*ec2.SecurityGroup, e groups, err := c.ec2.DescribeSecurityGroups(describeSecurityGroupsRequest) if err != nil { - glog.Warningf("Error retrieving security group: %q", err) + klog.Warningf("Error retrieving security group: %q", err) return nil, err } @@ -2675,7 +2650,7 @@ func ipPermissionExists(newPermission, existing *ec2.IpPermission, compareGroupU } // Check only if newPermission is a subset of existing. Usually it has zero or one elements. // Not doing actual CIDR math yet; not clear it's needed, either. - glog.V(4).Infof("Comparing %v to %v", newPermission, existing) + klog.V(4).Infof("Comparing %v to %v", newPermission, existing) if len(newPermission.IpRanges) > len(existing.IpRanges) { return false } @@ -2710,7 +2685,7 @@ func ipPermissionExists(newPermission, existing *ec2.IpPermission, compareGroupU } func isEqualUserGroupPair(l, r *ec2.UserIdGroupPair, compareGroupUserIDs bool) bool { - glog.V(2).Infof("Comparing %v to %v", *l.GroupId, *r.GroupId) + klog.V(2).Infof("Comparing %v to %v", *l.GroupId, *r.GroupId) if isEqualStringPointer(l.GroupId, r.GroupId) { if compareGroupUserIDs { if isEqualStringPointer(l.UserId, r.UserId) { @@ -2735,7 +2710,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe group, err := c.findSecurityGroup(securityGroupID) if err != nil { - glog.Warningf("Error retrieving security group %q", err) + klog.Warningf("Error retrieving security group %q", err) return false, err } @@ -2743,7 +2718,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe return false, fmt.Errorf("security group not found: %s", securityGroupID) } - glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) + klog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) actual := NewIPPermissionSet(group.IpPermissions...) @@ -2774,7 +2749,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe // don't want to accidentally open more than intended while we're // applying changes. if add.Len() != 0 { - glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, add.List()) + klog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, add.List()) request := &ec2.AuthorizeSecurityGroupIngressInput{} request.GroupId = &securityGroupID @@ -2785,7 +2760,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe } } if remove.Len() != 0 { - glog.V(2).Infof("Remove security group ingress: %s %v", securityGroupID, remove.List()) + klog.V(2).Infof("Remove security group ingress: %s %v", securityGroupID, remove.List()) request := &ec2.RevokeSecurityGroupIngressInput{} request.GroupId = &securityGroupID @@ -2810,7 +2785,7 @@ func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions [ group, err := c.findSecurityGroup(securityGroupID) if err != nil { - glog.Warningf("Error retrieving security group: %q", err) + klog.Warningf("Error retrieving security group: %q", err) return false, err } @@ -2818,7 +2793,7 @@ func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions [ return false, fmt.Errorf("security group not found: %s", securityGroupID) } - glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) + klog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) changes := []*ec2.IpPermission{} for _, addPermission := range addPermissions { @@ -2846,14 +2821,14 @@ func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions [ return false, nil } - glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, changes) + klog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, changes) request := &ec2.AuthorizeSecurityGroupIngressInput{} request.GroupId = &securityGroupID request.IpPermissions = changes _, err = c.ec2.AuthorizeSecurityGroupIngress(request) if err != nil { - glog.Warningf("Error authorizing security group ingress %q", err) + klog.Warningf("Error authorizing security group ingress %q", err) return false, fmt.Errorf("error authorizing security group ingress: %q", err) } @@ -2871,12 +2846,12 @@ func (c *Cloud) removeSecurityGroupIngress(securityGroupID string, removePermiss group, err := c.findSecurityGroup(securityGroupID) if err != nil { - glog.Warningf("Error retrieving security group: %q", err) + klog.Warningf("Error retrieving security group: %q", err) return false, err } if group == nil { - glog.Warning("Security group not found: ", securityGroupID) + klog.Warning("Security group not found: ", securityGroupID) return false, nil } @@ -2906,14 +2881,14 @@ func (c *Cloud) removeSecurityGroupIngress(securityGroupID string, removePermiss return false, nil } - glog.V(2).Infof("Removing security group ingress: %s %v", securityGroupID, changes) + klog.V(2).Infof("Removing security group ingress: %s %v", securityGroupID, changes) request := &ec2.RevokeSecurityGroupIngressInput{} request.GroupId = &securityGroupID request.IpPermissions = changes _, err = c.ec2.RevokeSecurityGroupIngress(request) if err != nil { - glog.Warningf("Error revoking security group ingress: %q", err) + klog.Warningf("Error revoking security group ingress: %q", err) return false, err } @@ -2949,7 +2924,7 @@ func (c *Cloud) ensureSecurityGroup(name string, description string, additionalT if len(securityGroups) >= 1 { if len(securityGroups) > 1 { - glog.Warningf("Found multiple security groups with name: %q", name) + klog.Warningf("Found multiple security groups with name: %q", name) } err := c.tagging.readRepairClusterTags( c.ec2, aws.StringValue(securityGroups[0].GroupId), @@ -2972,12 +2947,12 @@ func (c *Cloud) ensureSecurityGroup(name string, description string, additionalT switch err := err.(type) { case awserr.Error: if err.Code() == "InvalidGroup.Duplicate" && attempt < MaxReadThenCreateRetries { - glog.V(2).Infof("Got InvalidGroup.Duplicate while creating security group (race?); will retry") + klog.V(2).Infof("Got InvalidGroup.Duplicate while creating security group (race?); will retry") ignore = true } } if !ignore { - glog.Errorf("Error creating security group: %q", err) + klog.Errorf("Error creating security group: %q", err) return "", err } time.Sleep(1 * time.Second) @@ -3036,7 +3011,7 @@ func (c *Cloud) findSubnets() ([]*ec2.Subnet, error) { } // Fall back to the current instance subnets, if nothing is tagged - glog.Warningf("No tagged subnets found; will fall-back to the current subnet only. This is likely to be an error in a future version of k8s.") + klog.Warningf("No tagged subnets found; will fall-back to the current subnet only. This is likely to be an error in a future version of k8s.") request = &ec2.DescribeSubnetsInput{} filters = []*ec2.Filter{newEc2Filter("subnet-id", c.selfAWSInstance.subnetID)} @@ -3073,7 +3048,7 @@ func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) { az := aws.StringValue(subnet.AvailabilityZone) id := aws.StringValue(subnet.SubnetId) if az == "" || id == "" { - glog.Warningf("Ignoring subnet with empty az/id: %v", subnet) + klog.Warningf("Ignoring subnet with empty az/id: %v", subnet) continue } @@ -3082,7 +3057,7 @@ func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) { return nil, err } if !internalELB && !isPublic { - glog.V(2).Infof("Ignoring private subnet for public ELB %q", id) + klog.V(2).Infof("Ignoring private subnet for public ELB %q", id) continue } @@ -3113,12 +3088,12 @@ func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) { // If we have two subnets for the same AZ we arbitrarily choose the one that is first lexicographically. // TODO: Should this be an error. if strings.Compare(*existing.SubnetId, *subnet.SubnetId) > 0 { - glog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *subnet.SubnetId, *existing.SubnetId, *subnet.SubnetId) + klog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *subnet.SubnetId, *existing.SubnetId, *subnet.SubnetId) subnetsByAZ[az] = subnet continue } - glog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *existing.SubnetId, *existing.SubnetId, *subnet.SubnetId) + klog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *existing.SubnetId, *existing.SubnetId, *subnet.SubnetId) continue } @@ -3147,7 +3122,7 @@ func isSubnetPublic(rt []*ec2.RouteTable, subnetID string) (bool, error) { for _, table := range rt { for _, assoc := range table.Associations { if aws.BoolValue(assoc.Main) == true { - glog.V(4).Infof("Assuming implicit use of main routing table %s for %s", + klog.V(4).Infof("Assuming implicit use of main routing table %s for %s", aws.StringValue(table.RouteTableId), subnetID) subnetTable = table break @@ -3206,7 +3181,8 @@ func getPortSets(annotation string) (ports *portSets) { // attached to ELB created by a service. List always consist of at least // 1 member which is an SG created for this service or a SG from the Global config. // Extra groups can be specified via annotation, as can extra tags for any -// new groups. +// new groups. The annotation "ServiceAnnotationLoadBalancerSecurityGroups" allows for +// setting the security groups specified. func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, loadBalancerName string, annotations map[string]string) ([]string, error) { var err error var securityGroupID string @@ -3219,11 +3195,24 @@ func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, load sgDescription := fmt.Sprintf("Security group for Kubernetes ELB %s (%v)", loadBalancerName, serviceName) securityGroupID, err = c.ensureSecurityGroup(sgName, sgDescription, getLoadBalancerAdditionalTags(annotations)) if err != nil { - glog.Errorf("Error creating load balancer security group: %q", err) + klog.Errorf("Error creating load balancer security group: %q", err) return nil, err } } - sgList := []string{securityGroupID} + + sgList := []string{} + + for _, extraSG := range strings.Split(annotations[ServiceAnnotationLoadBalancerSecurityGroups], ",") { + extraSG = strings.TrimSpace(extraSG) + if len(extraSG) > 0 { + sgList = append(sgList, extraSG) + } + } + + // If no Security Groups have been specified with the ServiceAnnotationLoadBalancerSecurityGroups annotation, we add the default one. + if len(sgList) == 0 { + sgList = append(sgList, securityGroupID) + } for _, extraSG := range strings.Split(annotations[ServiceAnnotationLoadBalancerExtraSecurityGroups], ",") { extraSG = strings.TrimSpace(extraSG) @@ -3274,7 +3263,7 @@ func buildListener(port v1.ServicePort, annotations map[string]string, sslPorts // EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { annotations := apiService.Annotations - glog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", + klog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, c.region, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, annotations) if apiService.Spec.SessionAffinity != v1.ServiceAffinityNone { @@ -3296,7 +3285,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS return nil, fmt.Errorf("Only TCP LoadBalancer is supported for AWS ELB") } if port.NodePort == 0 { - glog.Errorf("Ignoring port without NodePort defined: %v", port) + klog.Errorf("Ignoring port without NodePort defined: %v", port) continue } @@ -3334,7 +3323,9 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS // Determine if this is tagged as an Internal ELB internalELB := false internalAnnotation := apiService.Annotations[ServiceAnnotationLoadBalancerInternal] - if internalAnnotation != "" { + if internalAnnotation == "false" { + internalELB = false + } else if internalAnnotation != "" { internalELB = true } @@ -3351,7 +3342,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS // Find the subnets that the ELB will live in subnetIDs, err := c.findELBSubnets(internalELB) if err != nil { - glog.Errorf("Error listing subnets in VPC: %q", err) + klog.Errorf("Error listing subnets in VPC: %q", err) return nil, err } // Bail out early if there are no subnets @@ -3390,7 +3381,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS err = c.updateInstanceSecurityGroupsForNLB(v2Mappings, instances, loadBalancerName, sourceRangeCidrs) if err != nil { - glog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) + klog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) return nil, err } @@ -3512,7 +3503,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS // Find the subnets that the ELB will live in subnetIDs, err := c.findELBSubnets(internalELB) if err != nil { - glog.Errorf("Error listing subnets in VPC: %q", err) + klog.Errorf("Error listing subnets in VPC: %q", err) return nil, err } @@ -3599,13 +3590,13 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS } if path, healthCheckNodePort := service.GetServiceHealthCheckPathPort(apiService); path != "" { - glog.V(4).Infof("service %v (%v) needs health checks on :%d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path) + klog.V(4).Infof("service %v (%v) needs health checks on :%d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path) err = c.ensureLoadBalancerHealthCheck(loadBalancer, "HTTP", healthCheckNodePort, path, annotations) if err != nil { return nil, fmt.Errorf("Failed to ensure health check for localized service %v on node port %v: %q", loadBalancerName, healthCheckNodePort, err) } } else { - glog.V(4).Infof("service %v does not need custom health checks", apiService.Name) + klog.V(4).Infof("service %v does not need custom health checks", apiService.Name) // We only configure a TCP health-check on the first port var tcpHealthCheckPort int32 for _, listener := range listeners { @@ -3624,17 +3615,17 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS err = c.updateInstanceSecurityGroupsForLoadBalancer(loadBalancer, instances) if err != nil { - glog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) + klog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) return nil, err } err = c.ensureLoadBalancerInstances(aws.StringValue(loadBalancer.LoadBalancerName), loadBalancer.Instances, instances) if err != nil { - glog.Warningf("Error registering instances with the load balancer: %q", err) + klog.Warningf("Error registering instances with the load balancer: %q", err) return nil, err } - glog.V(1).Infof("Loadbalancer %s (%v) has DNS name %s", loadBalancerName, serviceName, aws.StringValue(loadBalancer.DNSName)) + klog.V(1).Infof("Loadbalancer %s (%v) has DNS name %s", loadBalancerName, serviceName, aws.StringValue(loadBalancer.DNSName)) // TODO: Wait for creation? @@ -3691,7 +3682,7 @@ func toStatus(lb *elb.LoadBalancerDescription) *v1.LoadBalancerStatus { func v2toStatus(lb *elbv2.LoadBalancer) *v1.LoadBalancerStatus { status := &v1.LoadBalancerStatus{} if lb == nil { - glog.Error("[BUG] v2toStatus got nil input, this is a Kubernetes bug, please report") + klog.Error("[BUG] v2toStatus got nil input, this is a Kubernetes bug, please report") return status } @@ -3718,7 +3709,7 @@ func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups m for _, group := range instance.SecurityGroups { groupID := aws.StringValue(group.GroupId) if groupID == "" { - glog.Warningf("Ignoring security group without id for instance %q: %v", instanceID, group) + klog.Warningf("Ignoring security group without id for instance %q: %v", instanceID, group) continue } _, isTagged := taggedSecurityGroups[groupID] @@ -3750,7 +3741,7 @@ func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups m return untagged[0], nil } - glog.Warningf("No security group found for instance %q", instanceID) + klog.Warningf("No security group found for instance %q", instanceID) return nil, nil } @@ -3771,7 +3762,7 @@ func (c *Cloud) getTaggedSecurityGroups() (map[string]*ec2.SecurityGroup, error) id := aws.StringValue(group.GroupId) if id == "" { - glog.Warningf("Ignoring group without id: %v", group) + klog.Warningf("Ignoring group without id: %v", group) continue } m[id] = group @@ -3794,7 +3785,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer } if loadBalancerSecurityGroupID != "" { // We create LBs with one SG - glog.Warningf("Multiple security groups for load balancer: %q", aws.StringValue(lb.LoadBalancerName)) + klog.Warningf("Multiple security groups for load balancer: %q", aws.StringValue(lb.LoadBalancerName)) } loadBalancerSecurityGroupID = *securityGroup } @@ -3843,12 +3834,12 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer } if securityGroup == nil { - glog.Warning("Ignoring instance without security group: ", aws.StringValue(instance.InstanceId)) + klog.Warning("Ignoring instance without security group: ", aws.StringValue(instance.InstanceId)) continue } id := aws.StringValue(securityGroup.GroupId) if id == "" { - glog.Warningf("found security group without id: %v", securityGroup) + klog.Warningf("found security group without id: %v", securityGroup) continue } @@ -3859,7 +3850,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer for _, actualGroup := range actualGroups { actualGroupID := aws.StringValue(actualGroup.GroupId) if actualGroupID == "" { - glog.Warning("Ignoring group without ID: ", actualGroup) + klog.Warning("Ignoring group without ID: ", actualGroup) continue } @@ -3875,9 +3866,9 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer for instanceSecurityGroupID, add := range instanceSecurityGroupIds { if add { - glog.V(2).Infof("Adding rule for traffic from the load balancer (%s) to instances (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for traffic from the load balancer (%s) to instances (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) } else { - glog.V(2).Infof("Removing rule for traffic from the load balancer (%s) to instance (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for traffic from the load balancer (%s) to instance (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) } sourceGroupID := &ec2.UserIdGroupPair{} sourceGroupID.GroupId = &loadBalancerSecurityGroupID @@ -3896,7 +3887,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer return err } if !changed { - glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } else { changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, permissions) @@ -3904,7 +3895,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer return err } if !changed { - glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } } @@ -3922,7 +3913,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin return err } if lb == nil { - glog.Info("Load balancer already deleted: ", loadBalancerName) + klog.Info("Load balancer already deleted: ", loadBalancerName) return nil } @@ -4046,7 +4037,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin return err } if !changed { - glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", *matchingGroups[i].GroupId) + klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", *matchingGroups[i].GroupId) } } @@ -4064,7 +4055,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin } if lb == nil { - glog.Info("Load balancer already deleted: ", loadBalancerName) + klog.Info("Load balancer already deleted: ", loadBalancerName) return nil } @@ -4072,7 +4063,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin // De-authorize the load balancer security group from the instances security group err = c.updateInstanceSecurityGroupsForLoadBalancer(lb, nil) if err != nil { - glog.Errorf("Error deregistering load balancer from instance security groups: %q", err) + klog.Errorf("Error deregistering load balancer from instance security groups: %q", err) return err } } @@ -4085,7 +4076,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin _, err = c.elb.DeleteLoadBalancer(request) if err != nil { // TODO: Check if error was because load balancer was concurrently deleted - glog.Errorf("Error deleting load balancer: %q", err) + klog.Errorf("Error deleting load balancer: %q", err) return err } } @@ -4103,7 +4094,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin continue } if aws.StringValue(securityGroupID) == "" { - glog.Warning("Ignoring empty security group in ", service.Name) + klog.Warning("Ignoring empty security group in ", service.Name) continue } securityGroupIDs[*securityGroupID] = struct{}{} @@ -4122,7 +4113,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin ignore := false if awsError, ok := err.(awserr.Error); ok { if awsError.Code() == "DependencyViolation" { - glog.V(2).Infof("Ignoring DependencyViolation while deleting load-balancer security group (%s), assuming because LB is in process of deleting", securityGroupID) + klog.V(2).Infof("Ignoring DependencyViolation while deleting load-balancer security group (%s), assuming because LB is in process of deleting", securityGroupID) ignore = true } } @@ -4133,7 +4124,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin } if len(securityGroupIDs) == 0 { - glog.V(2).Info("Deleted all security groups for load balancer: ", service.Name) + klog.V(2).Info("Deleted all security groups for load balancer: ", service.Name) break } @@ -4146,7 +4137,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin return fmt.Errorf("timed out deleting ELB: %s. Could not delete security groups %v", service.Name, strings.Join(ids, ",")) } - glog.V(2).Info("Waiting for load-balancer to delete so we can delete security groups: ", service.Name) + klog.V(2).Info("Waiting for load-balancer to delete so we can delete security groups: ", service.Name) time.Sleep(10 * time.Second) } @@ -4277,14 +4268,14 @@ func (c *Cloud) getInstancesByNodeNames(nodeNames []string, states ...string) ([ instances, err := c.describeInstances(filters) if err != nil { - glog.V(2).Infof("Failed to describe instances %v", nodeNames) + klog.V(2).Infof("Failed to describe instances %v", nodeNames) return nil, err } ec2Instances = append(ec2Instances, instances...) } if len(ec2Instances) == 0 { - glog.V(3).Infof("Failed to find any instances %v", nodeNames) + klog.V(3).Infof("Failed to find any instances %v", nodeNames) return nil, nil } return ec2Instances, nil diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_fakes.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_fakes.go index 7743632723047..28946fa35c03b 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_fakes.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_fakes.go @@ -25,10 +25,10 @@ import ( "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elbv2" "github.com/aws/aws-sdk-go/service/kms" - - "github.com/golang/glog" + "k8s.io/klog" ) +// FakeAWSServices is an fake AWS session used for testing type FakeAWSServices struct { region string instances []*ec2.Instance @@ -45,7 +45,8 @@ type FakeAWSServices struct { kms *FakeKMS } -func NewFakeAWSServices(clusterId string) *FakeAWSServices { +// NewFakeAWSServices creates a new FakeAWSServices +func NewFakeAWSServices(clusterID string) *FakeAWSServices { s := &FakeAWSServices{} s.region = "us-east-1" s.ec2 = &FakeEC2Impl{aws: s} @@ -71,12 +72,13 @@ func NewFakeAWSServices(clusterId string) *FakeAWSServices { var tag ec2.Tag tag.Key = aws.String(TagNameKubernetesClusterLegacy) - tag.Value = aws.String(clusterId) + tag.Value = aws.String(clusterID) selfInstance.Tags = []*ec2.Tag{&tag} return s } +// WithAz sets the ec2 placement availability zone func (s *FakeAWSServices) WithAz(az string) *FakeAWSServices { if s.selfInstance.Placement == nil { s.selfInstance.Placement = &ec2.Placement{} @@ -85,30 +87,37 @@ func (s *FakeAWSServices) WithAz(az string) *FakeAWSServices { return s } +// Compute returns a fake EC2 client func (s *FakeAWSServices) Compute(region string) (EC2, error) { return s.ec2, nil } +// LoadBalancing returns a fake ELB client func (s *FakeAWSServices) LoadBalancing(region string) (ELB, error) { return s.elb, nil } +// LoadBalancingV2 returns a fake ELBV2 client func (s *FakeAWSServices) LoadBalancingV2(region string) (ELBV2, error) { return s.elbv2, nil } +// Autoscaling returns a fake ASG client func (s *FakeAWSServices) Autoscaling(region string) (ASG, error) { return s.asg, nil } +// Metadata returns a fake EC2Metadata client func (s *FakeAWSServices) Metadata() (EC2Metadata, error) { return s.metadata, nil } +// KeyManagement returns a fake KMS client func (s *FakeAWSServices) KeyManagement(region string) (KMS, error) { return s.kms, nil } +// FakeEC2 is a fake EC2 client used for testing type FakeEC2 interface { EC2 CreateSubnet(*ec2.Subnet) (*ec2.CreateSubnetOutput, error) @@ -117,6 +126,7 @@ type FakeEC2 interface { RemoveRouteTables() } +// FakeEC2Impl is an implementation of the FakeEC2 interface used for testing type FakeEC2Impl struct { aws *FakeAWSServices Subnets []*ec2.Subnet @@ -125,12 +135,13 @@ type FakeEC2Impl struct { DescribeRouteTablesInput *ec2.DescribeRouteTablesInput } +// DescribeInstances returns fake instance descriptions func (ec2i *FakeEC2Impl) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) { matches := []*ec2.Instance{} for _, instance := range ec2i.aws.instances { if request.InstanceIds != nil { if instance.InstanceId == nil { - glog.Warning("Instance with no instance id: ", instance) + klog.Warning("Instance with no instance id: ", instance) continue } @@ -163,54 +174,73 @@ func (ec2i *FakeEC2Impl) DescribeInstances(request *ec2.DescribeInstancesInput) return matches, nil } +// AttachVolume is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) AttachVolume(request *ec2.AttachVolumeInput) (resp *ec2.VolumeAttachment, err error) { panic("Not implemented") } +// DetachVolume is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) DetachVolume(request *ec2.DetachVolumeInput) (resp *ec2.VolumeAttachment, err error) { panic("Not implemented") } +// DescribeVolumes is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.Volume, error) { panic("Not implemented") } +// CreateVolume is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) CreateVolume(request *ec2.CreateVolumeInput) (resp *ec2.Volume, err error) { panic("Not implemented") } +// DeleteVolume is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) DeleteVolume(request *ec2.DeleteVolumeInput) (resp *ec2.DeleteVolumeOutput, err error) { panic("Not implemented") } +// DescribeSecurityGroups is not implemented but is required for interface +// conformance func (ec2i *FakeEC2Impl) DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error) { panic("Not implemented") } +// CreateSecurityGroup is not implemented but is required for interface +// conformance func (ec2i *FakeEC2Impl) CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) { panic("Not implemented") } +// DeleteSecurityGroup is not implemented but is required for interface +// conformance func (ec2i *FakeEC2Impl) DeleteSecurityGroup(*ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) { panic("Not implemented") } +// AuthorizeSecurityGroupIngress is not implemented but is required for +// interface conformance func (ec2i *FakeEC2Impl) AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) { panic("Not implemented") } +// RevokeSecurityGroupIngress is not implemented but is required for interface +// conformance func (ec2i *FakeEC2Impl) RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) { panic("Not implemented") } +// DescribeVolumeModifications is not implemented but is required for interface +// conformance func (ec2i *FakeEC2Impl) DescribeVolumeModifications(*ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error) { panic("Not implemented") } +// ModifyVolume is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) { panic("Not implemented") } +// CreateSubnet creates fake subnets func (ec2i *FakeEC2Impl) CreateSubnet(request *ec2.Subnet) (*ec2.CreateSubnetOutput, error) { ec2i.Subnets = append(ec2i.Subnets, request) response := &ec2.CreateSubnetOutput{ @@ -219,24 +249,29 @@ func (ec2i *FakeEC2Impl) CreateSubnet(request *ec2.Subnet) (*ec2.CreateSubnetOut return response, nil } +// DescribeSubnets returns fake subnet descriptions func (ec2i *FakeEC2Impl) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error) { ec2i.DescribeSubnetsInput = request return ec2i.Subnets, nil } +// RemoveSubnets clears subnets on client func (ec2i *FakeEC2Impl) RemoveSubnets() { ec2i.Subnets = ec2i.Subnets[:0] } +// CreateTags is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) { panic("Not implemented") } +// DescribeRouteTables returns fake route table descriptions func (ec2i *FakeEC2Impl) DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error) { ec2i.DescribeRouteTablesInput = request return ec2i.RouteTables, nil } +// CreateRouteTable creates fake route tables func (ec2i *FakeEC2Impl) CreateRouteTable(request *ec2.RouteTable) (*ec2.CreateRouteTableOutput, error) { ec2i.RouteTables = append(ec2i.RouteTables, request) response := &ec2.CreateRouteTableOutput{ @@ -245,30 +280,38 @@ func (ec2i *FakeEC2Impl) CreateRouteTable(request *ec2.RouteTable) (*ec2.CreateR return response, nil } +// RemoveRouteTables clears route tables on client func (ec2i *FakeEC2Impl) RemoveRouteTables() { ec2i.RouteTables = ec2i.RouteTables[:0] } +// CreateRoute is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) { panic("Not implemented") } +// DeleteRoute is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) { panic("Not implemented") } +// ModifyInstanceAttribute is not implemented but is required for interface +// conformance func (ec2i *FakeEC2Impl) ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) { panic("Not implemented") } +// DescribeVpcs returns fake VPC descriptions func (ec2i *FakeEC2Impl) DescribeVpcs(request *ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) { return &ec2.DescribeVpcsOutput{Vpcs: []*ec2.Vpc{{CidrBlock: aws.String("172.20.0.0/16")}}}, nil } +// FakeMetadata is a fake EC2 metadata service client used for testing type FakeMetadata struct { aws *FakeAWSServices } +// GetMetadata returns fake EC2 metadata for testing func (m *FakeMetadata) GetMetadata(key string) (string, error) { networkInterfacesPrefix := "network/interfaces/macs/" i := m.aws.selfInstance @@ -291,199 +334,292 @@ func (m *FakeMetadata) GetMetadata(key string) (string, error) { } else if strings.HasPrefix(key, networkInterfacesPrefix) { if key == networkInterfacesPrefix { return strings.Join(m.aws.networkInterfacesMacs, "/\n") + "/\n", nil - } else { - keySplit := strings.Split(key, "/") - macParam := keySplit[3] - if len(keySplit) == 5 && keySplit[4] == "vpc-id" { - for i, macElem := range m.aws.networkInterfacesMacs { - if macParam == macElem { - return m.aws.networkInterfacesVpcIDs[i], nil - } + } + + keySplit := strings.Split(key, "/") + macParam := keySplit[3] + if len(keySplit) == 5 && keySplit[4] == "vpc-id" { + for i, macElem := range m.aws.networkInterfacesMacs { + if macParam == macElem { + return m.aws.networkInterfacesVpcIDs[i], nil } } - if len(keySplit) == 5 && keySplit[4] == "local-ipv4s" { - for i, macElem := range m.aws.networkInterfacesMacs { - if macParam == macElem { - return strings.Join(m.aws.networkInterfacesPrivateIPs[i], "/\n"), nil - } + } + if len(keySplit) == 5 && keySplit[4] == "local-ipv4s" { + for i, macElem := range m.aws.networkInterfacesMacs { + if macParam == macElem { + return strings.Join(m.aws.networkInterfacesPrivateIPs[i], "/\n"), nil } } - return "", nil } + + return "", nil } else { return "", nil } } +// FakeELB is a fake ELB client used for testing type FakeELB struct { aws *FakeAWSServices } +// CreateLoadBalancer is not implemented but is required for interface +// conformance func (elb *FakeELB) CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error) { panic("Not implemented") } +// DeleteLoadBalancer is not implemented but is required for interface +// conformance func (elb *FakeELB) DeleteLoadBalancer(input *elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error) { panic("Not implemented") } +// DescribeLoadBalancers is not implemented but is required for interface +// conformance func (elb *FakeELB) DescribeLoadBalancers(input *elb.DescribeLoadBalancersInput) (*elb.DescribeLoadBalancersOutput, error) { panic("Not implemented") } +// AddTags is not implemented but is required for interface conformance func (elb *FakeELB) AddTags(input *elb.AddTagsInput) (*elb.AddTagsOutput, error) { panic("Not implemented") } +// RegisterInstancesWithLoadBalancer is not implemented but is required for +// interface conformance func (elb *FakeELB) RegisterInstancesWithLoadBalancer(*elb.RegisterInstancesWithLoadBalancerInput) (*elb.RegisterInstancesWithLoadBalancerOutput, error) { panic("Not implemented") } +// DeregisterInstancesFromLoadBalancer is not implemented but is required for +// interface conformance func (elb *FakeELB) DeregisterInstancesFromLoadBalancer(*elb.DeregisterInstancesFromLoadBalancerInput) (*elb.DeregisterInstancesFromLoadBalancerOutput, error) { panic("Not implemented") } +// DetachLoadBalancerFromSubnets is not implemented but is required for +// interface conformance func (elb *FakeELB) DetachLoadBalancerFromSubnets(*elb.DetachLoadBalancerFromSubnetsInput) (*elb.DetachLoadBalancerFromSubnetsOutput, error) { panic("Not implemented") } +// AttachLoadBalancerToSubnets is not implemented but is required for interface +// conformance func (elb *FakeELB) AttachLoadBalancerToSubnets(*elb.AttachLoadBalancerToSubnetsInput) (*elb.AttachLoadBalancerToSubnetsOutput, error) { panic("Not implemented") } +// CreateLoadBalancerListeners is not implemented but is required for interface +// conformance func (elb *FakeELB) CreateLoadBalancerListeners(*elb.CreateLoadBalancerListenersInput) (*elb.CreateLoadBalancerListenersOutput, error) { panic("Not implemented") } +// DeleteLoadBalancerListeners is not implemented but is required for interface +// conformance func (elb *FakeELB) DeleteLoadBalancerListeners(*elb.DeleteLoadBalancerListenersInput) (*elb.DeleteLoadBalancerListenersOutput, error) { panic("Not implemented") } +// ApplySecurityGroupsToLoadBalancer is not implemented but is required for +// interface conformance func (elb *FakeELB) ApplySecurityGroupsToLoadBalancer(*elb.ApplySecurityGroupsToLoadBalancerInput) (*elb.ApplySecurityGroupsToLoadBalancerOutput, error) { panic("Not implemented") } +// ConfigureHealthCheck is not implemented but is required for interface +// conformance func (elb *FakeELB) ConfigureHealthCheck(*elb.ConfigureHealthCheckInput) (*elb.ConfigureHealthCheckOutput, error) { panic("Not implemented") } +// CreateLoadBalancerPolicy is not implemented but is required for interface +// conformance func (elb *FakeELB) CreateLoadBalancerPolicy(*elb.CreateLoadBalancerPolicyInput) (*elb.CreateLoadBalancerPolicyOutput, error) { panic("Not implemented") } +// SetLoadBalancerPoliciesForBackendServer is not implemented but is required +// for interface conformance func (elb *FakeELB) SetLoadBalancerPoliciesForBackendServer(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*elb.SetLoadBalancerPoliciesForBackendServerOutput, error) { panic("Not implemented") } +// SetLoadBalancerPoliciesOfListener is not implemented but is required for +// interface conformance func (elb *FakeELB) SetLoadBalancerPoliciesOfListener(input *elb.SetLoadBalancerPoliciesOfListenerInput) (*elb.SetLoadBalancerPoliciesOfListenerOutput, error) { panic("Not implemented") } +// DescribeLoadBalancerPolicies is not implemented but is required for +// interface conformance func (elb *FakeELB) DescribeLoadBalancerPolicies(input *elb.DescribeLoadBalancerPoliciesInput) (*elb.DescribeLoadBalancerPoliciesOutput, error) { panic("Not implemented") } +// DescribeLoadBalancerAttributes is not implemented but is required for +// interface conformance func (elb *FakeELB) DescribeLoadBalancerAttributes(*elb.DescribeLoadBalancerAttributesInput) (*elb.DescribeLoadBalancerAttributesOutput, error) { panic("Not implemented") } +// ModifyLoadBalancerAttributes is not implemented but is required for +// interface conformance func (elb *FakeELB) ModifyLoadBalancerAttributes(*elb.ModifyLoadBalancerAttributesInput) (*elb.ModifyLoadBalancerAttributesOutput, error) { panic("Not implemented") } -func (self *FakeELB) expectDescribeLoadBalancers(loadBalancerName string) { +// expectDescribeLoadBalancers is not implemented but is required for interface +// conformance +func (elb *FakeELB) expectDescribeLoadBalancers(loadBalancerName string) { panic("Not implemented") } +// FakeELBV2 is a fake ELBV2 client used for testing type FakeELBV2 struct { aws *FakeAWSServices } -func (self *FakeELBV2) AddTags(input *elbv2.AddTagsInput) (*elbv2.AddTagsOutput, error) { +// AddTags is not implemented but is required for interface conformance +func (elb *FakeELBV2) AddTags(input *elbv2.AddTagsInput) (*elbv2.AddTagsOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) CreateLoadBalancer(*elbv2.CreateLoadBalancerInput) (*elbv2.CreateLoadBalancerOutput, error) { +// CreateLoadBalancer is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) CreateLoadBalancer(*elbv2.CreateLoadBalancerInput) (*elbv2.CreateLoadBalancerOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DescribeLoadBalancers(*elbv2.DescribeLoadBalancersInput) (*elbv2.DescribeLoadBalancersOutput, error) { + +// DescribeLoadBalancers is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DescribeLoadBalancers(*elbv2.DescribeLoadBalancersInput) (*elbv2.DescribeLoadBalancersOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DeleteLoadBalancer(*elbv2.DeleteLoadBalancerInput) (*elbv2.DeleteLoadBalancerOutput, error) { + +// DeleteLoadBalancer is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DeleteLoadBalancer(*elbv2.DeleteLoadBalancerInput) (*elbv2.DeleteLoadBalancerOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) ModifyLoadBalancerAttributes(*elbv2.ModifyLoadBalancerAttributesInput) (*elbv2.ModifyLoadBalancerAttributesOutput, error) { +// ModifyLoadBalancerAttributes is not implemented but is required for +// interface conformance +func (elb *FakeELBV2) ModifyLoadBalancerAttributes(*elbv2.ModifyLoadBalancerAttributesInput) (*elbv2.ModifyLoadBalancerAttributesOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DescribeLoadBalancerAttributes(*elbv2.DescribeLoadBalancerAttributesInput) (*elbv2.DescribeLoadBalancerAttributesOutput, error) { + +// DescribeLoadBalancerAttributes is not implemented but is required for +// interface conformance +func (elb *FakeELBV2) DescribeLoadBalancerAttributes(*elbv2.DescribeLoadBalancerAttributesInput) (*elbv2.DescribeLoadBalancerAttributesOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) CreateTargetGroup(*elbv2.CreateTargetGroupInput) (*elbv2.CreateTargetGroupOutput, error) { +// CreateTargetGroup is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) CreateTargetGroup(*elbv2.CreateTargetGroupInput) (*elbv2.CreateTargetGroupOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DescribeTargetGroups(*elbv2.DescribeTargetGroupsInput) (*elbv2.DescribeTargetGroupsOutput, error) { + +// DescribeTargetGroups is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DescribeTargetGroups(*elbv2.DescribeTargetGroupsInput) (*elbv2.DescribeTargetGroupsOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) ModifyTargetGroup(*elbv2.ModifyTargetGroupInput) (*elbv2.ModifyTargetGroupOutput, error) { + +// ModifyTargetGroup is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) ModifyTargetGroup(*elbv2.ModifyTargetGroupInput) (*elbv2.ModifyTargetGroupOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DeleteTargetGroup(*elbv2.DeleteTargetGroupInput) (*elbv2.DeleteTargetGroupOutput, error) { + +// DeleteTargetGroup is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DeleteTargetGroup(*elbv2.DeleteTargetGroupInput) (*elbv2.DeleteTargetGroupOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DescribeTargetHealth(input *elbv2.DescribeTargetHealthInput) (*elbv2.DescribeTargetHealthOutput, error) { +// DescribeTargetHealth is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DescribeTargetHealth(input *elbv2.DescribeTargetHealthInput) (*elbv2.DescribeTargetHealthOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DescribeTargetGroupAttributes(*elbv2.DescribeTargetGroupAttributesInput) (*elbv2.DescribeTargetGroupAttributesOutput, error) { +// DescribeTargetGroupAttributes is not implemented but is required for +// interface conformance +func (elb *FakeELBV2) DescribeTargetGroupAttributes(*elbv2.DescribeTargetGroupAttributesInput) (*elbv2.DescribeTargetGroupAttributesOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) ModifyTargetGroupAttributes(*elbv2.ModifyTargetGroupAttributesInput) (*elbv2.ModifyTargetGroupAttributesOutput, error) { + +// ModifyTargetGroupAttributes is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) ModifyTargetGroupAttributes(*elbv2.ModifyTargetGroupAttributesInput) (*elbv2.ModifyTargetGroupAttributesOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) RegisterTargets(*elbv2.RegisterTargetsInput) (*elbv2.RegisterTargetsOutput, error) { +// RegisterTargets is not implemented but is required for interface conformance +func (elb *FakeELBV2) RegisterTargets(*elbv2.RegisterTargetsInput) (*elbv2.RegisterTargetsOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DeregisterTargets(*elbv2.DeregisterTargetsInput) (*elbv2.DeregisterTargetsOutput, error) { + +// DeregisterTargets is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DeregisterTargets(*elbv2.DeregisterTargetsInput) (*elbv2.DeregisterTargetsOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) CreateListener(*elbv2.CreateListenerInput) (*elbv2.CreateListenerOutput, error) { +// CreateListener is not implemented but is required for interface conformance +func (elb *FakeELBV2) CreateListener(*elbv2.CreateListenerInput) (*elbv2.CreateListenerOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DescribeListeners(*elbv2.DescribeListenersInput) (*elbv2.DescribeListenersOutput, error) { + +// DescribeListeners is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DescribeListeners(*elbv2.DescribeListenersInput) (*elbv2.DescribeListenersOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DeleteListener(*elbv2.DeleteListenerInput) (*elbv2.DeleteListenerOutput, error) { + +// DeleteListener is not implemented but is required for interface conformance +func (elb *FakeELBV2) DeleteListener(*elbv2.DeleteListenerInput) (*elbv2.DeleteListenerOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) ModifyListener(*elbv2.ModifyListenerInput) (*elbv2.ModifyListenerOutput, error) { + +// ModifyListener is not implemented but is required for interface conformance +func (elb *FakeELBV2) ModifyListener(*elbv2.ModifyListenerInput) (*elbv2.ModifyListenerOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) WaitUntilLoadBalancersDeleted(*elbv2.DescribeLoadBalancersInput) error { +// WaitUntilLoadBalancersDeleted is not implemented but is required for +// interface conformance +func (elb *FakeELBV2) WaitUntilLoadBalancersDeleted(*elbv2.DescribeLoadBalancersInput) error { panic("Not implemented") } +// FakeASG is a fake Autoscaling client used for testing type FakeASG struct { aws *FakeAWSServices } +// UpdateAutoScalingGroup is not implemented but is required for interface +// conformance func (a *FakeASG) UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error) { panic("Not implemented") } +// DescribeAutoScalingGroups is not implemented but is required for interface +// conformance func (a *FakeASG) DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) { panic("Not implemented") } +// FakeKMS is a fake KMS client used for testing type FakeKMS struct { aws *FakeAWSServices } +// DescribeKey is not implemented but is required for interface conformance func (kms *FakeKMS) DescribeKey(*kms.DescribeKeyInput) (*kms.DescribeKeyOutput, error) { panic("Not implemented") } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go index 6bf63ae6fe667..6c4c59b039e57 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go @@ -21,7 +21,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/golang/glog" + "k8s.io/klog" ) // AWSCloud implements InstanceGroups @@ -42,7 +42,7 @@ func ResizeInstanceGroup(asg ASG, instanceGroupName string, size int) error { return nil } -// Implement InstanceGroups.ResizeInstanceGroup +// ResizeInstanceGroup implements InstanceGroups.ResizeInstanceGroup // Set the size to the fixed size func (c *Cloud) ResizeInstanceGroup(instanceGroupName string, size int) error { return ResizeInstanceGroup(c.asg, instanceGroupName, size) @@ -64,13 +64,13 @@ func DescribeInstanceGroup(asg ASG, instanceGroupName string) (InstanceGroupInfo return nil, nil } if len(response.AutoScalingGroups) > 1 { - glog.Warning("AWS returned multiple autoscaling groups with name ", instanceGroupName) + klog.Warning("AWS returned multiple autoscaling groups with name ", instanceGroupName) } group := response.AutoScalingGroups[0] return &awsInstanceGroup{group: group}, nil } -// Implement InstanceGroups.DescribeInstanceGroup +// DescribeInstanceGroup implements InstanceGroups.DescribeInstanceGroup // Queries the cloud provider for information about the specified instance group func (c *Cloud) DescribeInstanceGroup(instanceGroupName string) (InstanceGroupInfo, error) { return DescribeInstanceGroup(c.asg, instanceGroupName) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go index 25583ca569760..088c16133a510 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go @@ -28,15 +28,20 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" ) const ( + // ProxyProtocolPolicyName is the tag named used for the proxy protocol + // policy ProxyProtocolPolicyName = "k8s-proxyprotocol-enabled" + // SSLNegotiationPolicyNameFormat is a format string used for the SSL + // negotiation policy tag name SSLNegotiationPolicyNameFormat = "k8s-SSLNegotiationPolicy-%s" ) @@ -129,7 +134,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa }) } - glog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) + klog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) createResponse, err := c.elbv2.CreateLoadBalancer(createRequest) if err != nil { return nil, fmt.Errorf("Error creating load balancer: %q", err) @@ -138,10 +143,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa loadBalancer = createResponse.LoadBalancers[0] // Create Target Groups - addTagsInput := &elbv2.AddTagsInput{ - ResourceArns: []*string{}, - Tags: []*elbv2.Tag{}, - } + resourceArns := make([]*string, 0, len(mappings)) for i := range mappings { // It is easier to keep track of updates by having possibly @@ -150,20 +152,28 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa if err != nil { return nil, fmt.Errorf("Error creating listener: %q", err) } - addTagsInput.ResourceArns = append(addTagsInput.ResourceArns, targetGroupArn) + resourceArns = append(resourceArns, targetGroupArn) } // Add tags to targets + targetGroupTags := make([]*elbv2.Tag, 0, len(tags)) + for k, v := range tags { - addTagsInput.Tags = append(addTagsInput.Tags, &elbv2.Tag{ + targetGroupTags = append(targetGroupTags, &elbv2.Tag{ Key: aws.String(k), Value: aws.String(v), }) } - if len(addTagsInput.ResourceArns) > 0 && len(addTagsInput.Tags) > 0 { - _, err = c.elbv2.AddTags(addTagsInput) - if err != nil { - return nil, fmt.Errorf("Error adding tags after creating Load Balancer: %q", err) + if len(resourceArns) > 0 && len(targetGroupTags) > 0 { + // elbv2.AddTags doesn't allow to tag multiple resources at once + for _, arn := range resourceArns { + _, err = c.elbv2.AddTags(&elbv2.AddTagsInput{ + ResourceArns: []*string{arn}, + Tags: targetGroupTags, + }) + if err != nil { + return nil, fmt.Errorf("Error adding tags after creating Load Balancer: %q", err) + } } } } else { @@ -335,7 +345,7 @@ func createTargetName(namespacedName types.NamespacedName, frontendPort, nodePor func (c *Cloud) createListenerV2(loadBalancerArn *string, mapping nlbPortMapping, namespacedName types.NamespacedName, instanceIDs []string, vpcID string) (listener *elbv2.Listener, targetGroupArn *string, err error) { targetName := createTargetName(namespacedName, mapping.FrontendPort, mapping.TrafficPort) - glog.Infof("Creating load balancer target group for %v with name: %s", namespacedName, targetName) + klog.Infof("Creating load balancer target group for %v with name: %s", namespacedName, targetName) target, err := c.ensureTargetGroup( nil, mapping, @@ -356,7 +366,7 @@ func (c *Cloud) createListenerV2(loadBalancerArn *string, mapping nlbPortMapping Type: aws.String(elbv2.ActionTypeEnumForward), }}, } - glog.Infof("Creating load balancer listener for %v", namespacedName) + klog.Infof("Creating load balancer listener for %v", namespacedName) createListenerOutput, err := c.elbv2.CreateListener(createListernerInput) if err != nil { return nil, aws.String(""), fmt.Errorf("Error creating load balancer listener: %q", err) @@ -564,12 +574,17 @@ func filterForIPRangeDescription(securityGroups []*ec2.SecurityGroup, lbName str response := []*ec2.SecurityGroup{} clientRule := fmt.Sprintf("%s=%s", NLBClientRuleDescription, lbName) healthRule := fmt.Sprintf("%s=%s", NLBHealthCheckRuleDescription, lbName) + alreadyAdded := sets.NewString() for i := range securityGroups { for j := range securityGroups[i].IpPermissions { for k := range securityGroups[i].IpPermissions[j].IpRanges { description := aws.StringValue(securityGroups[i].IpPermissions[j].IpRanges[k].Description) if description == clientRule || description == healthRule { - response = append(response, securityGroups[i]) + sgIDString := aws.StringValue(securityGroups[i].GroupId) + if !alreadyAdded.Has(sgIDString) { + response = append(response, securityGroups[i]) + alreadyAdded.Insert(sgIDString) + } } } } @@ -577,7 +592,7 @@ func filterForIPRangeDescription(securityGroups []*ec2.SecurityGroup, lbName str return response } -func (c *Cloud) getVpcCidrBlock() (*string, error) { +func (c *Cloud) getVpcCidrBlocks() ([]string, error) { vpcs, err := c.ec2.DescribeVpcs(&ec2.DescribeVpcsInput{ VpcIds: []*string{aws.String(c.vpcID)}, }) @@ -587,13 +602,19 @@ func (c *Cloud) getVpcCidrBlock() (*string, error) { if len(vpcs.Vpcs) != 1 { return nil, fmt.Errorf("Error querying VPC for ELB, got %d vpcs for %s", len(vpcs.Vpcs), c.vpcID) } - return vpcs.Vpcs[0].CidrBlock, nil + + cidrBlocks := make([]string, 0, len(vpcs.Vpcs[0].CidrBlockAssociationSet)) + for _, cidr := range vpcs.Vpcs[0].CidrBlockAssociationSet { + cidrBlocks = append(cidrBlocks, aws.StringValue(cidr.CidrBlock)) + } + return cidrBlocks, nil } // abstraction for updating SG rules // if clientTraffic is false, then only update HealthCheck rules func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.SecurityGroup, desiredSgIds []string, ports []int64, lbName string, clientCidrs []string, clientTraffic bool) error { + klog.V(8).Infof("updateInstanceSecurityGroupsForNLBTraffic: actualGroups=%v, desiredSgIds=%v, ports=%v, clientTraffic=%v", actualGroups, desiredSgIds, ports, clientTraffic) // Map containing the groups we want to make changes on; the ports to make // changes on; and whether to add or remove it. true to add, false to remove portChanges := map[string]map[int64]bool{} @@ -612,7 +633,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se for _, actualGroup := range actualGroups { actualGroupID := aws.StringValue(actualGroup.GroupId) if actualGroupID == "" { - glog.Warning("Ignoring group without ID: ", actualGroup) + klog.Warning("Ignoring group without ID: ", actualGroup) continue } @@ -647,17 +668,17 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se for port, add := range portMap { if add { if clientTraffic { - glog.V(2).Infof("Adding rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) - glog.V(2).Infof("Adding rule for client traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for client traffic from the network load balancer (%s) to instances (%s), port (%v)", clientCidrs, instanceSecurityGroupID, port) } else { - glog.V(2).Infof("Adding rule for health check traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for health check traffic from the network load balancer (%s) to instances (%s), port (%v)", clientCidrs, instanceSecurityGroupID, port) } } else { if clientTraffic { - glog.V(2).Infof("Removing rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) - glog.V(2).Infof("Removing rule for client traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for client traffic from the network load balancer (%s) to instance (%s), port (%v)", clientCidrs, instanceSecurityGroupID, port) } - glog.V(2).Infof("Removing rule for health check traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for health check traffic from the network load balancer (%s) to instance (%s), port (%v)", clientCidrs, instanceSecurityGroupID, port) } if clientTraffic { @@ -712,7 +733,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se return err } if !changed { - glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } @@ -722,7 +743,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se return err } if !changed { - glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } @@ -745,12 +766,12 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se group, err := c.findSecurityGroup(instanceSecurityGroupID) if err != nil { - glog.Warningf("Error retrieving security group: %q", err) + klog.Warningf("Error retrieving security group: %q", err) return err } if group == nil { - glog.Warning("Security group not found: ", instanceSecurityGroupID) + klog.Warning("Security group not found: ", instanceSecurityGroupID) return nil } @@ -771,21 +792,21 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se // the icmp permission is missing changed, err := c.addSecurityGroupIngress(instanceSecurityGroupID, []*ec2.IpPermission{mtuPermission}) if err != nil { - glog.Warningf("Error adding MTU permission to security group: %q", err) + klog.Warningf("Error adding MTU permission to security group: %q", err) return err } if !changed { - glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } else if icmpExists && permCount == 0 { // there is no additional permissions, remove icmp changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, []*ec2.IpPermission{mtuPermission}) if err != nil { - glog.Warningf("Error removing MTU permission to security group: %q", err) + klog.Warningf("Error removing MTU permission to security group: %q", err) return err } if !changed { - glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } } @@ -799,7 +820,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLB(mappings []nlbPortMapping, in return nil } - vpcCidr, err := c.getVpcCidrBlock() + vpcCidrBlocks, err := c.getVpcCidrBlocks() if err != nil { return err } @@ -864,13 +885,13 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLB(mappings []nlbPortMapping, in } if securityGroup == nil { - glog.Warningf("Ignoring instance without security group: %s", aws.StringValue(instance.InstanceId)) + klog.Warningf("Ignoring instance without security group: %s", aws.StringValue(instance.InstanceId)) continue } id := aws.StringValue(securityGroup.GroupId) if id == "" { - glog.Warningf("found security group without id: %v", securityGroup) + klog.Warningf("found security group without id: %v", securityGroup) continue } @@ -884,7 +905,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLB(mappings []nlbPortMapping, in } // Run once for health check traffic - err = c.updateInstanceSecurityGroupsForNLBTraffic(actualGroups, desiredGroupIds, healthCheckPorts, lbName, []string{aws.StringValue(vpcCidr)}, false) + err = c.updateInstanceSecurityGroupsForNLBTraffic(actualGroups, desiredGroupIds, healthCheckPorts, lbName, vpcCidrBlocks, false) if err != nil { return err } @@ -937,7 +958,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala }) } - glog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) + klog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) _, err := c.elb.CreateLoadBalancer(createRequest) if err != nil { return nil, err @@ -950,7 +971,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala } for _, listener := range listeners { - glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort) + klog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort) err := c.setBackendPolicies(loadBalancerName, *listener.InstancePort, []*string{aws.String(ProxyProtocolPolicyName)}) if err != nil { return nil, err @@ -974,7 +995,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala request := &elb.DetachLoadBalancerFromSubnetsInput{} request.LoadBalancerName = aws.String(loadBalancerName) request.Subnets = stringSetToPointers(removals) - glog.V(2).Info("Detaching load balancer from removed subnets") + klog.V(2).Info("Detaching load balancer from removed subnets") _, err := c.elb.DetachLoadBalancerFromSubnets(request) if err != nil { return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %q", err) @@ -986,7 +1007,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala request := &elb.AttachLoadBalancerToSubnetsInput{} request.LoadBalancerName = aws.String(loadBalancerName) request.Subnets = stringSetToPointers(additions) - glog.V(2).Info("Attaching load balancer to added subnets") + klog.V(2).Info("Attaching load balancer to added subnets") _, err := c.elb.AttachLoadBalancerToSubnets(request) if err != nil { return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %q", err) @@ -1009,7 +1030,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala } else { request.SecurityGroups = aws.StringSlice(securityGroupIDs) } - glog.V(2).Info("Applying updated security groups to load balancer") + klog.V(2).Info("Applying updated security groups to load balancer") _, err := c.elb.ApplySecurityGroupsToLoadBalancer(request) if err != nil { return nil, fmt.Errorf("error applying AWS loadbalancer security groups: %q", err) @@ -1027,7 +1048,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala for _, listenerDescription := range listenerDescriptions { actual := listenerDescription.Listener if actual == nil { - glog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName) + klog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName) continue } @@ -1069,7 +1090,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala request := &elb.DeleteLoadBalancerListenersInput{} request.LoadBalancerName = aws.String(loadBalancerName) request.LoadBalancerPorts = removals - glog.V(2).Info("Deleting removed load balancer listeners") + klog.V(2).Info("Deleting removed load balancer listeners") _, err := c.elb.DeleteLoadBalancerListeners(request) if err != nil { return nil, fmt.Errorf("error deleting AWS loadbalancer listeners: %q", err) @@ -1081,7 +1102,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala request := &elb.CreateLoadBalancerListenersInput{} request.LoadBalancerName = aws.String(loadBalancerName) request.Listeners = additions - glog.V(2).Info("Creating added load balancer listeners") + klog.V(2).Info("Creating added load balancer listeners") _, err := c.elb.CreateLoadBalancerListeners(request) if err != nil { return nil, fmt.Errorf("error creating AWS loadbalancer listeners: %q", err) @@ -1133,7 +1154,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala } if setPolicy { - glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to %t", instancePort, proxyProtocol) + klog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to %t", instancePort, proxyProtocol) err := c.setBackendPolicies(loadBalancerName, instancePort, proxyPolicies) if err != nil { return nil, err @@ -1147,7 +1168,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala // corresponding listener anymore for instancePort, found := range foundBackends { if !found { - glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to false", instancePort) + klog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to false", instancePort) err := c.setBackendPolicies(loadBalancerName, instancePort, []*string{}) if err != nil { return nil, err @@ -1159,7 +1180,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala { // Add additional tags - glog.V(2).Infof("Creating additional load balancer tags for %s", loadBalancerName) + klog.V(2).Infof("Creating additional load balancer tags for %s", loadBalancerName) tags := getLoadBalancerAdditionalTags(annotations) if len(tags) > 0 { err := c.addLoadBalancerTags(loadBalancerName, tags) @@ -1178,7 +1199,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala describeAttributesRequest.LoadBalancerName = aws.String(loadBalancerName) describeAttributesOutput, err := c.elb.DescribeLoadBalancerAttributes(describeAttributesRequest) if err != nil { - glog.Warning("Unable to retrieve load balancer attributes during attribute sync") + klog.Warning("Unable to retrieve load balancer attributes during attribute sync") return nil, err } @@ -1186,7 +1207,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala // Update attributes if they're dirty if !reflect.DeepEqual(loadBalancerAttributes, foundAttributes) { - glog.V(2).Infof("Updating load-balancer attributes for %q", loadBalancerName) + klog.V(2).Infof("Updating load-balancer attributes for %q", loadBalancerName) modifyAttributesRequest := &elb.ModifyLoadBalancerAttributesInput{} modifyAttributesRequest.LoadBalancerName = aws.String(loadBalancerName) @@ -1202,7 +1223,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala if dirty { loadBalancer, err = c.describeLoadBalancer(loadBalancerName) if err != nil { - glog.Warning("Unable to retrieve load balancer after creation/update") + klog.Warning("Unable to retrieve load balancer after creation/update") return nil, err } } @@ -1326,16 +1347,16 @@ func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances removals := actual.Difference(expected) addInstances := []*elb.Instance{} - for _, instanceId := range additions.List() { + for _, instanceID := range additions.List() { addInstance := &elb.Instance{} - addInstance.InstanceId = aws.String(instanceId) + addInstance.InstanceId = aws.String(instanceID) addInstances = append(addInstances, addInstance) } removeInstances := []*elb.Instance{} - for _, instanceId := range removals.List() { + for _, instanceID := range removals.List() { removeInstance := &elb.Instance{} - removeInstance.InstanceId = aws.String(instanceId) + removeInstance.InstanceId = aws.String(instanceID) removeInstances = append(removeInstances, removeInstance) } @@ -1347,7 +1368,7 @@ func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances if err != nil { return err } - glog.V(1).Infof("Instances added to load-balancer %s", loadBalancerName) + klog.V(1).Infof("Instances added to load-balancer %s", loadBalancerName) } if len(removeInstances) > 0 { @@ -1358,7 +1379,7 @@ func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances if err != nil { return err } - glog.V(1).Infof("Instances removed from load-balancer %s", loadBalancerName) + klog.V(1).Infof("Instances removed from load-balancer %s", loadBalancerName) } return nil @@ -1377,7 +1398,7 @@ func (c *Cloud) getLoadBalancerTLSPorts(loadBalancer *elb.LoadBalancerDescriptio } func (c *Cloud) ensureSSLNegotiationPolicy(loadBalancer *elb.LoadBalancerDescription, policyName string) error { - glog.V(2).Info("Describing load balancer policies on load balancer") + klog.V(2).Info("Describing load balancer policies on load balancer") result, err := c.elb.DescribeLoadBalancerPolicies(&elb.DescribeLoadBalancerPoliciesInput{ LoadBalancerName: loadBalancer.LoadBalancerName, PolicyNames: []*string{ @@ -1398,7 +1419,7 @@ func (c *Cloud) ensureSSLNegotiationPolicy(loadBalancer *elb.LoadBalancerDescrip return nil } - glog.V(2).Infof("Creating SSL negotiation policy '%s' on load balancer", fmt.Sprintf(SSLNegotiationPolicyNameFormat, policyName)) + klog.V(2).Infof("Creating SSL negotiation policy '%s' on load balancer", fmt.Sprintf(SSLNegotiationPolicyNameFormat, policyName)) // there is an upper limit of 98 policies on an ELB, we're pretty safe from // running into it _, err = c.elb.CreateLoadBalancerPolicy(&elb.CreateLoadBalancerPolicyInput{ @@ -1427,7 +1448,7 @@ func (c *Cloud) setSSLNegotiationPolicy(loadBalancerName, sslPolicyName string, aws.String(policyName), }, } - glog.V(2).Infof("Setting SSL negotiation policy '%s' on load balancer", policyName) + klog.V(2).Infof("Setting SSL negotiation policy '%s' on load balancer", policyName) _, err := c.elb.SetLoadBalancerPoliciesOfListener(request) if err != nil { return fmt.Errorf("error setting SSL negotiation policy '%s' on load balancer: %q", policyName, err) @@ -1447,7 +1468,7 @@ func (c *Cloud) createProxyProtocolPolicy(loadBalancerName string) error { }, }, } - glog.V(2).Info("Creating proxy protocol policy on load balancer") + klog.V(2).Info("Creating proxy protocol policy on load balancer") _, err := c.elb.CreateLoadBalancerPolicy(request) if err != nil { return fmt.Errorf("error creating proxy protocol policy on load balancer: %q", err) @@ -1463,9 +1484,9 @@ func (c *Cloud) setBackendPolicies(loadBalancerName string, instancePort int64, PolicyNames: policies, } if len(policies) > 0 { - glog.V(2).Infof("Adding AWS loadbalancer backend policies on node port %d", instancePort) + klog.V(2).Infof("Adding AWS loadbalancer backend policies on node port %d", instancePort) } else { - glog.V(2).Infof("Removing AWS loadbalancer backend policies on node port %d", instancePort) + klog.V(2).Infof("Removing AWS loadbalancer backend policies on node port %d", instancePort) } _, err := c.elb.SetLoadBalancerPoliciesForBackendServer(request) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go index 3ef0ddce575f1..2827596dce49b 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go @@ -22,8 +22,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/klog" + + cloudprovider "k8s.io/cloud-provider" ) func (c *Cloud) findRouteTable(clusterName string) (*ec2.RouteTable, error) { @@ -116,7 +117,7 @@ func (c *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpro route.TargetNode = mapInstanceToNodeName(instance) routes = append(routes, route) } else { - glog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID) + klog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID) } } } @@ -171,7 +172,7 @@ func (c *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint st } if deleteRoute != nil { - glog.Infof("deleting blackholed route: %s", aws.StringValue(deleteRoute.DestinationCidrBlock)) + klog.Infof("deleting blackholed route: %s", aws.StringValue(deleteRoute.DestinationCidrBlock)) request := &ec2.DeleteRouteInput{} request.DestinationCidrBlock = deleteRoute.DestinationCidrBlock diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go index b56699f854b70..bd373d64e5881 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go @@ -18,6 +18,7 @@ package aws import ( "github.com/aws/aws-sdk-go/aws" + "k8s.io/apimachinery/pkg/util/sets" ) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator.go index 490b673bb189a..ae5b0275773af 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator.go @@ -27,18 +27,20 @@ import ( // can be used for anything that DeviceAllocator user wants. // Only the relevant part of device name should be in the map, e.g. "ba" for // "/dev/xvdba". -type ExistingDevices map[mountDevice]awsVolumeID +type ExistingDevices map[mountDevice]EBSVolumeID -// On AWS, we should assign new (not yet used) device names to attached volumes. -// If we reuse a previously used name, we may get the volume "attaching" forever, -// see https://aws.amazon.com/premiumsupport/knowledge-center/ebs-stuck-attaching/. // DeviceAllocator finds available device name, taking into account already // assigned device names from ExistingDevices map. It tries to find the next // device name to the previously assigned one (from previous DeviceAllocator // call), so all available device names are used eventually and it minimizes // device name reuse. +// // All these allocations are in-memory, nothing is written to / read from // /dev directory. +// +// On AWS, we should assign new (not yet used) device names to attached volumes. +// If we reuse a previously used name, we may get the volume "attaching" forever, +// see https://aws.amazon.com/premiumsupport/knowledge-center/ebs-stuck-attaching/. type DeviceAllocator interface { // GetNext returns a free device name or error when there is no free device // name. Only the device suffix is returned, e.g. "ba" for "/dev/xvdba". @@ -74,9 +76,9 @@ func (p devicePairList) Len() int { return len(p) } func (p devicePairList) Less(i, j int) bool { return p[i].deviceIndex < p[j].deviceIndex } func (p devicePairList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -// Allocates device names according to scheme ba..bz, ca..cz -// it moves along the ring and always picks next device until -// device list is exhausted. +// NewDeviceAllocator allocates device names according to scheme ba..bz, ca..cz +// it moves along the ring and always picks next device until device list is +// exhausted. func NewDeviceAllocator() DeviceAllocator { possibleDevices := make(map[mountDevice]int) for _, firstChar := range []rune{'b', 'c'} { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go index a42834415dd0f..60b10abc6a815 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go @@ -19,15 +19,16 @@ package aws import ( "fmt" "net/url" + "regexp" "strings" + "sync" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/api/core/v1" - "regexp" - "sync" - "time" ) // awsInstanceRegMatch represents Regex Match for AWS instance. @@ -109,12 +110,12 @@ func mapToAWSInstanceIDsTolerant(nodes []*v1.Node) []awsInstanceID { var instanceIDs []awsInstanceID for _, node := range nodes { if node.Spec.ProviderID == "" { - glog.Warningf("node %q did not have ProviderID set", node.Name) + klog.Warningf("node %q did not have ProviderID set", node.Name) continue } instanceID, err := kubernetesInstanceID(node.Spec.ProviderID).mapToAWSInstanceID() if err != nil { - glog.Warningf("unable to parse ProviderID %q for node %q", node.Spec.ProviderID, node.Name) + klog.Warningf("unable to parse ProviderID %q for node %q", node.Spec.ProviderID, node.Name) continue } instanceIDs = append(instanceIDs, instanceID) @@ -155,7 +156,7 @@ type instanceCache struct { func (c *instanceCache) describeAllInstancesUncached() (*allInstancesSnapshot, error) { now := time.Now() - glog.V(4).Infof("EC2 DescribeInstances - fetching all instances") + klog.V(4).Infof("EC2 DescribeInstances - fetching all instances") filters := []*ec2.Filter{} instances, err := c.cloud.describeInstances(filters) @@ -176,7 +177,7 @@ func (c *instanceCache) describeAllInstancesUncached() (*allInstancesSnapshot, e if c.snapshot != nil && snapshot.olderThan(c.snapshot) { // If this happens a lot, we could run this function in a mutex and only return one result - glog.Infof("Not caching concurrent AWS DescribeInstances results") + klog.Infof("Not caching concurrent AWS DescribeInstances results") } else { c.snapshot = snapshot } @@ -209,7 +210,7 @@ func (c *instanceCache) describeAllInstancesCached(criteria cacheCriteria) (*all return nil, err } } else { - glog.V(6).Infof("EC2 DescribeInstances - using cached results") + klog.V(6).Infof("EC2 DescribeInstances - using cached results") } return snapshot, nil @@ -235,7 +236,7 @@ func (s *allInstancesSnapshot) MeetsCriteria(criteria cacheCriteria) bool { // Sub() is technically broken by time changes until we have monotonic time now := time.Now() if now.Sub(s.timestamp) > criteria.MaxAge { - glog.V(6).Infof("instanceCache snapshot cannot be used as is older than MaxAge=%s", criteria.MaxAge) + klog.V(6).Infof("instanceCache snapshot cannot be used as is older than MaxAge=%s", criteria.MaxAge) return false } } @@ -243,7 +244,7 @@ func (s *allInstancesSnapshot) MeetsCriteria(criteria cacheCriteria) bool { if len(criteria.HasInstances) != 0 { for _, id := range criteria.HasInstances { if nil == s.instances[id] { - glog.V(6).Infof("instanceCache snapshot cannot be used as does not contain instance %s", id) + klog.V(6).Infof("instanceCache snapshot cannot be used as does not contain instance %s", id) return false } } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/log_handler.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/log_handler.go index 86aa30628db10..9328fd284ac7a 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/log_handler.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/log_handler.go @@ -18,23 +18,23 @@ package aws import ( "github.com/aws/aws-sdk-go/aws/request" - "github.com/golang/glog" + "k8s.io/klog" ) // Handler for aws-sdk-go that logs all requests func awsHandlerLogger(req *request.Request) { service, name := awsServiceAndName(req) - glog.V(4).Infof("AWS request: %s %s", service, name) + klog.V(4).Infof("AWS request: %s %s", service, name) } func awsSendHandlerLogger(req *request.Request) { service, name := awsServiceAndName(req) - glog.V(4).Infof("AWS API Send: %s %s %v %v", service, name, req.Operation, req.Params) + klog.V(4).Infof("AWS API Send: %s %s %v %v", service, name, req.Operation, req.Params) } func awsValidateResponseHandlerLogger(req *request.Request) { service, name := awsServiceAndName(req) - glog.V(4).Infof("AWS API ValidateResponse: %s %s %v %v %s", service, name, req.Operation, req.Params, req.HTTPResponse.Status) + klog.V(4).Infof("AWS API ValidateResponse: %s %s %v %v %s", service, name, req.Operation, req.Params, req.HTTPResponse.Status) } func awsServiceAndName(req *request.Request) (string, string) { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions.go index dc57447729b31..f19bab6eb550d 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions.go @@ -17,18 +17,21 @@ limitations under the License. package aws import ( - "github.com/golang/glog" + "sync" + + "k8s.io/klog" + "k8s.io/apimachinery/pkg/util/sets" awscredentialprovider "k8s.io/kubernetes/pkg/credentialprovider/aws" - "sync" ) -// WellKnownRegions is the complete list of regions known to the AWS cloudprovider +// wellKnownRegions is the complete list of regions known to the AWS cloudprovider // and credentialprovider. -var WellKnownRegions = [...]string{ +var wellKnownRegions = [...]string{ // from `aws ec2 describe-regions --region us-east-1 --query Regions[].RegionName | sort` "ap-northeast-1", "ap-northeast-2", + "ap-northeast-3", "ap-south-1", "ap-southeast-1", "ap-southeast-2", @@ -36,6 +39,7 @@ var WellKnownRegions = [...]string{ "eu-central-1", "eu-west-1", "eu-west-2", + "eu-west-3", "sa-east-1", "us-east-1", "us-east-2", @@ -44,6 +48,7 @@ var WellKnownRegions = [...]string{ // these are not registered in many / most accounts "cn-north-1", + "cn-northwest-1", "us-gov-west-1", } @@ -53,12 +58,12 @@ var awsRegionsMutex sync.Mutex // awsRegions is a set of recognized regions var awsRegions sets.String -// RecognizeRegion is called for each AWS region we know about. +// recognizeRegion is called for each AWS region we know about. // It currently registers a credential provider for that region. // There are two paths to discovering a region: // * we hard-code some well-known regions // * if a region is discovered from instance metadata, we add that -func RecognizeRegion(region string) { +func recognizeRegion(region string) { awsRegionsMutex.Lock() defer awsRegionsMutex.Unlock() @@ -67,21 +72,21 @@ func RecognizeRegion(region string) { } if awsRegions.Has(region) { - glog.V(6).Infof("found AWS region %q again - ignoring", region) + klog.V(6).Infof("found AWS region %q again - ignoring", region) return } - glog.V(4).Infof("found AWS region %q", region) + klog.V(4).Infof("found AWS region %q", region) awscredentialprovider.RegisterCredentialsProvider(region) awsRegions.Insert(region) } -// RecognizeWellKnownRegions calls RecognizeRegion on each WellKnownRegion -func RecognizeWellKnownRegions() { - for _, region := range WellKnownRegions { - RecognizeRegion(region) +// recognizeWellKnownRegions calls RecognizeRegion on each WellKnownRegion +func recognizeWellKnownRegions() { + for _, region := range wellKnownRegions { + recognizeRegion(region) } } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go index f403168e620c4..0fe6c2a575342 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go @@ -24,7 +24,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -40,19 +40,19 @@ type CrossRequestRetryDelay struct { backoff Backoff } -// Create a new CrossRequestRetryDelay +// NewCrossRequestRetryDelay creates a new CrossRequestRetryDelay func NewCrossRequestRetryDelay() *CrossRequestRetryDelay { c := &CrossRequestRetryDelay{} c.backoff.init(decayIntervalSeconds, decayFraction, maxDelay) return c } -// Added to the Sign chain; called before each request +// BeforeSign is added to the Sign chain; called before each request func (c *CrossRequestRetryDelay) BeforeSign(r *request.Request) { now := time.Now() delay := c.backoff.ComputeDelayForRequest(now) if delay > 0 { - glog.Warningf("Inserting delay before AWS request (%s) to avoid RequestLimitExceeded: %s", + klog.Warningf("Inserting delay before AWS request (%s) to avoid RequestLimitExceeded: %s", describeRequest(r), delay.String()) if sleepFn := r.Config.SleepDelay; sleepFn != nil { @@ -84,7 +84,7 @@ func describeRequest(r *request.Request) string { return service + "::" + operationName(r) } -// Added to the AfterRetry chain; called after any error +// AfterRetry is added to the AfterRetry chain; called after any error func (c *CrossRequestRetryDelay) AfterRetry(r *request.Request) { if r.Error == nil { return @@ -96,7 +96,7 @@ func (c *CrossRequestRetryDelay) AfterRetry(r *request.Request) { if awsError.Code() == "RequestLimitExceeded" { c.backoff.ReportError() recordAWSThrottlesMetric(operationName(r)) - glog.Warningf("Got RequestLimitExceeded error on AWS request (%s)", + klog.Warningf("Got RequestLimitExceeded error on AWS request (%s)", describeRequest(r)) } } @@ -126,7 +126,8 @@ func (b *Backoff) init(decayIntervalSeconds int, decayFraction float64, maxDelay b.maxDelay = maxDelay } -// Computes the delay required for a request, also updating internal state to count this request +// ComputeDelayForRequest computes the delay required for a request, also +// updates internal state to count this request func (b *Backoff) ComputeDelayForRequest(now time.Time) time.Duration { b.mutex.Lock() defer b.mutex.Unlock() @@ -165,7 +166,7 @@ func (b *Backoff) ComputeDelayForRequest(now time.Time) time.Duration { return time.Second * time.Duration(int(delay.Seconds())) } -// Called when we observe a throttling error +// ReportError is called when we observe a throttling error func (b *Backoff) ReportError() { b.mutex.Lock() defer b.mutex.Unlock() diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/sets_ippermissions.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/sets_ippermissions.go index 8a268c4b0e277..d71948f8d22db 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/sets_ippermissions.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/sets_ippermissions.go @@ -23,8 +23,10 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" ) +// IPPermissionSet maps IP strings of strings to EC2 IpPermissions type IPPermissionSet map[string]*ec2.IpPermission +// NewIPPermissionSet creates a new IPPermissionSet func NewIPPermissionSet(items ...*ec2.IpPermission) IPPermissionSet { s := make(IPPermissionSet) s.Insert(items...) @@ -97,10 +99,10 @@ func (s IPPermissionSet) List() []*ec2.IpPermission { return res } -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 IPPermissionSet) IsSuperset(s2 IPPermissionSet) bool { +// IsSuperset returns true if and only if s is a superset of s2. +func (s IPPermissionSet) IsSuperset(s2 IPPermissionSet) bool { for k := range s2 { - _, found := s1[k] + _, found := s[k] if !found { return false } @@ -108,11 +110,11 @@ func (s1 IPPermissionSet) IsSuperset(s2 IPPermissionSet) bool { return true } -// Equal returns true if and only if s1 is equal (as a set) to s2. +// Equal returns true if and only if s is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) -func (s1 IPPermissionSet) Equal(s2 IPPermissionSet) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) +func (s IPPermissionSet) Equal(s2 IPPermissionSet) bool { + return len(s) == len(s2) && s.IsSuperset(s2) } // Difference returns a set of objects that are not in s2 diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go index 43130c3601f64..de6cad543e35a 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go @@ -18,12 +18,12 @@ package aws import ( "fmt" - "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/apimachinery/pkg/util/wait" ) @@ -38,6 +38,7 @@ const TagNameKubernetesClusterPrefix = "kubernetes.io/cluster/" // did not allow shared resources. const TagNameKubernetesClusterLegacy = "KubernetesCluster" +// ResourceLifecycle is the cluster lifecycle state used in tagging type ResourceLifecycle string const ( @@ -73,7 +74,7 @@ func (t *awsTagging) init(legacyClusterID string, clusterID string) error { t.ClusterID = clusterID if clusterID != "" { - glog.Infof("AWS cloud filtering on ClusterID: %v", clusterID) + klog.Infof("AWS cloud filtering on ClusterID: %v", clusterID) } else { return fmt.Errorf("AWS cloud failed to find ClusterID") } @@ -91,7 +92,7 @@ func (t *awsTagging) initFromTags(tags []*ec2.Tag) error { } if legacyClusterID == "" && newClusterID == "" { - glog.Errorf("Tag %q nor %q not found; Kubernetes may behave unexpectedly.", TagNameKubernetesClusterLegacy, TagNameKubernetesClusterPrefix+"...") + klog.Errorf("Tag %q nor %q not found; Kubernetes may behave unexpectedly.", TagNameKubernetesClusterLegacy, TagNameKubernetesClusterPrefix+"...") } return t.init(legacyClusterID, newClusterID) @@ -152,13 +153,13 @@ func (t *awsTagging) hasClusterTag(tags []*ec2.Tag) bool { // Ensure that a resource has the correct tags // If it has no tags, we assume that this was a problem caused by an error in between creation and tagging, // and we add the tags. If it has a different cluster's tags, that is an error. -func (c *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecycle ResourceLifecycle, additionalTags map[string]string, observedTags []*ec2.Tag) error { +func (t *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecycle ResourceLifecycle, additionalTags map[string]string, observedTags []*ec2.Tag) error { actualTagMap := make(map[string]string) for _, tag := range observedTags { actualTagMap[aws.StringValue(tag.Key)] = aws.StringValue(tag.Value) } - expectedTags := c.buildTags(lifecycle, additionalTags) + expectedTags := t.buildTags(lifecycle, additionalTags) addTags := make(map[string]string) for k, expected := range expectedTags { @@ -167,7 +168,7 @@ func (c *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecy continue } if actual == "" { - glog.Warningf("Resource %q was missing expected cluster tag %q. Will add (with value %q)", resourceID, k, expected) + klog.Warningf("Resource %q was missing expected cluster tag %q. Will add (with value %q)", resourceID, k, expected) addTags[k] = expected } else { return fmt.Errorf("resource %q has tag belonging to another cluster: %q=%q (expected %q)", resourceID, k, actual, expected) @@ -178,7 +179,7 @@ func (c *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecy return nil } - if err := c.createTags(client, resourceID, lifecycle, addTags); err != nil { + if err := t.createTags(client, resourceID, lifecycle, addTags); err != nil { return fmt.Errorf("error adding missing tags to resource %q: %q", resourceID, err) } @@ -222,7 +223,7 @@ func (t *awsTagging) createTags(client EC2, resourceID string, lifecycle Resourc // We could check that the error is retryable, but the error code changes based on what we are tagging // SecurityGroup: InvalidGroup.NotFound - glog.V(2).Infof("Failed to create tags; will retry. Error was %q", err) + klog.V(2).Infof("Failed to create tags; will retry. Error was %q", err) lastErr = err return false, nil }) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go index cb3a2aa275ac8..7031c5a52c141 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go @@ -24,21 +24,22 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/apimachinery/pkg/types" ) // awsVolumeRegMatch represents Regex Match for AWS volume. var awsVolumeRegMatch = regexp.MustCompile("^vol-[^/]*$") -// awsVolumeID represents the ID of the volume in the AWS API, e.g. vol-12345678 -// The "traditional" format is "vol-12345678" -// A new longer format is also being introduced: "vol-12345678abcdef01" -// We should not assume anything about the length or format, though it seems -// reasonable to assume that volumes will continue to start with "vol-". -type awsVolumeID string +// EBSVolumeID represents the ID of the volume in the AWS API, e.g. +// vol-12345678 The "traditional" format is "vol-12345678" A new longer format +// is also being introduced: "vol-12345678abcdef01" We should not assume +// anything about the length or format, though it seems reasonable to assume +// that volumes will continue to start with "vol-". +type EBSVolumeID string -func (i awsVolumeID) awsString() *string { +func (i EBSVolumeID) awsString() *string { return aws.String(string(i)) } @@ -59,8 +60,8 @@ type diskInfo struct { disk *awsDisk } -// MapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID -func (name KubernetesVolumeID) MapToAWSVolumeID() (awsVolumeID, error) { +// MapToAWSVolumeID extracts the EBSVolumeID from the KubernetesVolumeID +func (name KubernetesVolumeID) MapToAWSVolumeID() (EBSVolumeID, error) { // name looks like aws://availability-zone/awsVolumeId // The original idea of the URL-style name was to put the AZ into the @@ -96,9 +97,10 @@ func (name KubernetesVolumeID) MapToAWSVolumeID() (awsVolumeID, error) { return "", fmt.Errorf("Invalid format for AWS volume (%s)", name) } - return awsVolumeID(awsID), nil + return EBSVolumeID(awsID), nil } +// GetAWSVolumeID converts a Kubernetes volume ID to an AWS volume ID func GetAWSVolumeID(kubeVolumeID string) (string, error) { kid := KubernetesVolumeID(kubeVolumeID) awsID, err := kid.MapToAWSVolumeID() @@ -119,7 +121,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type info, err := disk.describeVolume() if err != nil { - glog.Warningf("Error describing volume %s with %v", diskName, err) + klog.Warningf("Error describing volume %s with %v", diskName, err) awsDiskInfo.volumeState = "unknown" return awsDiskInfo, false, err } @@ -136,7 +138,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type // has been deleted if err != nil { fetchErr := fmt.Errorf("Error fetching instance %s for volume %s", instanceID, diskName) - glog.Warning(fetchErr) + klog.Warning(fetchErr) return awsDiskInfo, false, fetchErr } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD.bazel index 4f4c0d03f128a..b61368f8751a3 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "gce_clusterid.go", "gce_clusters.go", "gce_disks.go", + "gce_fake.go", "gce_firewall.go", "gce_forwardingrule.go", "gce_healthchecks.go", @@ -42,7 +43,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/cloud.google.com/go/compute/metadata:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/golang.org/x/oauth2/google:go_default_library", @@ -61,6 +61,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/version:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", @@ -71,17 +72,18 @@ go_library( "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", + "//vendor/k8s.io/cloud-provider:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/api/v1/service:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/cloudprovider:go_default_library", "//vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud:go_default_library", "//vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter:go_default_library", "//vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock:go_default_library", "//vendor/k8s.io/kubernetes/pkg/controller:go_default_library", "//vendor/k8s.io/kubernetes/pkg/features:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubelet/apis:go_default_library", "//vendor/k8s.io/kubernetes/pkg/master/ports:go_default_library", "//vendor/k8s.io/kubernetes/pkg/util/net/sets:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/util/version:go_default_library", "//vendor/k8s.io/kubernetes/pkg/version:go_default_library", "//vendor/k8s.io/kubernetes/pkg/volume:go_default_library", "//vendor/k8s.io/kubernetes/pkg/volume/util:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/OWNERS b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/OWNERS index 2941ce492b5e1..f02eb9550b19f 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/OWNERS @@ -3,6 +3,5 @@ approvers: - jingxu97 - bowei - freehan -- nicksardo - mrhohn -- dnardo +- cheftako diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD.bazel index f0610121ca21c..738df331de1c4 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD.bazel @@ -18,11 +18,11 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", "//vendor/google.golang.org/api/compute/v1:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter:go_default_library", "//vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/BUILD.bazel index b65900f6816c3..fcd819d3aff3d 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/BUILD.bazel @@ -6,5 +6,5 @@ go_library( importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter", importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/filter.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/filter.go index c08005726c879..b65ab6391a7d0 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/filter.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/filter.go @@ -34,7 +34,7 @@ import ( "regexp" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -221,7 +221,7 @@ func (fp *filterPredicate) String() string { func (fp *filterPredicate) match(o interface{}) bool { v, err := extractValue(fp.fieldName, o) - glog.V(6).Infof("extractValue(%q, %#v) = %v, %v", fp.fieldName, o, v, err) + klog.V(6).Infof("extractValue(%q, %#v) = %v, %v", fp.fieldName, o, v, err) if err != nil { return false } @@ -234,7 +234,7 @@ func (fp *filterPredicate) match(o interface{}) bool { } re, err := regexp.Compile(*fp.s) if err != nil { - glog.Errorf("Match regexp %q is invalid: %v", *fp.s, err) + klog.Errorf("Match regexp %q is invalid: %v", *fp.s, err) return false } match = re.Match([]byte(x)) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go index a25cac9909c85..f51ec77f0df01 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -25,8 +25,8 @@ import ( "net/http" "sync" - "github.com/golang/glog" "google.golang.org/api/googleapi" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" @@ -48,7 +48,7 @@ type Cloud interface { RegionBackendServices() RegionBackendServices AlphaRegionBackendServices() AlphaRegionBackendServices Disks() Disks - BetaRegionDisks() BetaRegionDisks + RegionDisks() RegionDisks Firewalls() Firewalls ForwardingRules() ForwardingRules AlphaForwardingRules() AlphaForwardingRules @@ -89,7 +89,7 @@ func NewGCE(s *Service) *GCE { gceRegionBackendServices: &GCERegionBackendServices{s}, gceAlphaRegionBackendServices: &GCEAlphaRegionBackendServices{s}, gceDisks: &GCEDisks{s}, - gceBetaRegionDisks: &GCEBetaRegionDisks{s}, + gceRegionDisks: &GCERegionDisks{s}, gceFirewalls: &GCEFirewalls{s}, gceForwardingRules: &GCEForwardingRules{s}, gceAlphaForwardingRules: &GCEAlphaForwardingRules{s}, @@ -134,7 +134,7 @@ type GCE struct { gceRegionBackendServices *GCERegionBackendServices gceAlphaRegionBackendServices *GCEAlphaRegionBackendServices gceDisks *GCEDisks - gceBetaRegionDisks *GCEBetaRegionDisks + gceRegionDisks *GCERegionDisks gceFirewalls *GCEFirewalls gceForwardingRules *GCEForwardingRules gceAlphaForwardingRules *GCEAlphaForwardingRules @@ -212,9 +212,9 @@ func (gce *GCE) Disks() Disks { return gce.gceDisks } -// BetaRegionDisks returns the interface for the beta RegionDisks. -func (gce *GCE) BetaRegionDisks() BetaRegionDisks { - return gce.gceBetaRegionDisks +// RegionDisks returns the interface for the ga RegionDisks. +func (gce *GCE) RegionDisks() RegionDisks { + return gce.gceRegionDisks } // Firewalls returns the interface for the ga Firewalls. @@ -381,7 +381,7 @@ func NewMockGCE(projectRouter ProjectRouter) *MockGCE { MockRegionBackendServices: NewMockRegionBackendServices(projectRouter, mockRegionBackendServicesObjs), MockAlphaRegionBackendServices: NewMockAlphaRegionBackendServices(projectRouter, mockRegionBackendServicesObjs), MockDisks: NewMockDisks(projectRouter, mockDisksObjs), - MockBetaRegionDisks: NewMockBetaRegionDisks(projectRouter, mockRegionDisksObjs), + MockRegionDisks: NewMockRegionDisks(projectRouter, mockRegionDisksObjs), MockFirewalls: NewMockFirewalls(projectRouter, mockFirewallsObjs), MockForwardingRules: NewMockForwardingRules(projectRouter, mockForwardingRulesObjs), MockAlphaForwardingRules: NewMockAlphaForwardingRules(projectRouter, mockForwardingRulesObjs), @@ -426,7 +426,7 @@ type MockGCE struct { MockRegionBackendServices *MockRegionBackendServices MockAlphaRegionBackendServices *MockAlphaRegionBackendServices MockDisks *MockDisks - MockBetaRegionDisks *MockBetaRegionDisks + MockRegionDisks *MockRegionDisks MockFirewalls *MockFirewalls MockForwardingRules *MockForwardingRules MockAlphaForwardingRules *MockAlphaForwardingRules @@ -504,9 +504,9 @@ func (mock *MockGCE) Disks() Disks { return mock.MockDisks } -// BetaRegionDisks returns the interface for the beta RegionDisks. -func (mock *MockGCE) BetaRegionDisks() BetaRegionDisks { - return mock.MockBetaRegionDisks +// RegionDisks returns the interface for the ga RegionDisks. +func (mock *MockGCE) RegionDisks() RegionDisks { + return mock.MockRegionDisks } // Firewalls returns the interface for the ga Firewalls. @@ -649,7 +649,7 @@ func (m *MockAddressesObj) ToAlpha() *alpha.Address { // Convert the object via JSON copying to the type that was requested. ret := &alpha.Address{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.Address via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.Address via JSON: %v", m.Obj, err) } return ret } @@ -662,7 +662,7 @@ func (m *MockAddressesObj) ToBeta() *beta.Address { // Convert the object via JSON copying to the type that was requested. ret := &beta.Address{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.Address via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.Address via JSON: %v", m.Obj, err) } return ret } @@ -675,7 +675,7 @@ func (m *MockAddressesObj) ToGA() *ga.Address { // Convert the object via JSON copying to the type that was requested. ret := &ga.Address{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) } return ret } @@ -695,7 +695,7 @@ func (m *MockBackendServicesObj) ToAlpha() *alpha.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &alpha.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -708,7 +708,7 @@ func (m *MockBackendServicesObj) ToBeta() *beta.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &beta.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -721,7 +721,7 @@ func (m *MockBackendServicesObj) ToGA() *ga.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &ga.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -741,7 +741,7 @@ func (m *MockDisksObj) ToGA() *ga.Disk { // Convert the object via JSON copying to the type that was requested. ret := &ga.Disk{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Disk via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Disk via JSON: %v", m.Obj, err) } return ret } @@ -761,7 +761,7 @@ func (m *MockFirewallsObj) ToGA() *ga.Firewall { // Convert the object via JSON copying to the type that was requested. ret := &ga.Firewall{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Firewall via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Firewall via JSON: %v", m.Obj, err) } return ret } @@ -781,7 +781,7 @@ func (m *MockForwardingRulesObj) ToAlpha() *alpha.ForwardingRule { // Convert the object via JSON copying to the type that was requested. ret := &alpha.ForwardingRule{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.ForwardingRule via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.ForwardingRule via JSON: %v", m.Obj, err) } return ret } @@ -794,7 +794,7 @@ func (m *MockForwardingRulesObj) ToGA() *ga.ForwardingRule { // Convert the object via JSON copying to the type that was requested. ret := &ga.ForwardingRule{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) } return ret } @@ -814,7 +814,7 @@ func (m *MockGlobalAddressesObj) ToGA() *ga.Address { // Convert the object via JSON copying to the type that was requested. ret := &ga.Address{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) } return ret } @@ -834,7 +834,7 @@ func (m *MockGlobalForwardingRulesObj) ToGA() *ga.ForwardingRule { // Convert the object via JSON copying to the type that was requested. ret := &ga.ForwardingRule{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) } return ret } @@ -854,7 +854,7 @@ func (m *MockHealthChecksObj) ToAlpha() *alpha.HealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &alpha.HealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.HealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.HealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -867,7 +867,7 @@ func (m *MockHealthChecksObj) ToBeta() *beta.HealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &beta.HealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.HealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.HealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -880,7 +880,7 @@ func (m *MockHealthChecksObj) ToGA() *ga.HealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &ga.HealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.HealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.HealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -900,7 +900,7 @@ func (m *MockHttpHealthChecksObj) ToGA() *ga.HttpHealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &ga.HttpHealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.HttpHealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.HttpHealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -920,7 +920,7 @@ func (m *MockHttpsHealthChecksObj) ToGA() *ga.HttpsHealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &ga.HttpsHealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.HttpsHealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.HttpsHealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -940,7 +940,7 @@ func (m *MockInstanceGroupsObj) ToGA() *ga.InstanceGroup { // Convert the object via JSON copying to the type that was requested. ret := &ga.InstanceGroup{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.InstanceGroup via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.InstanceGroup via JSON: %v", m.Obj, err) } return ret } @@ -960,7 +960,7 @@ func (m *MockInstancesObj) ToAlpha() *alpha.Instance { // Convert the object via JSON copying to the type that was requested. ret := &alpha.Instance{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.Instance via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.Instance via JSON: %v", m.Obj, err) } return ret } @@ -973,7 +973,7 @@ func (m *MockInstancesObj) ToBeta() *beta.Instance { // Convert the object via JSON copying to the type that was requested. ret := &beta.Instance{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.Instance via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.Instance via JSON: %v", m.Obj, err) } return ret } @@ -986,7 +986,7 @@ func (m *MockInstancesObj) ToGA() *ga.Instance { // Convert the object via JSON copying to the type that was requested. ret := &ga.Instance{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Instance via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Instance via JSON: %v", m.Obj, err) } return ret } @@ -1006,7 +1006,7 @@ func (m *MockNetworkEndpointGroupsObj) ToAlpha() *alpha.NetworkEndpointGroup { // Convert the object via JSON copying to the type that was requested. ret := &alpha.NetworkEndpointGroup{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.NetworkEndpointGroup via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.NetworkEndpointGroup via JSON: %v", m.Obj, err) } return ret } @@ -1019,7 +1019,7 @@ func (m *MockNetworkEndpointGroupsObj) ToBeta() *beta.NetworkEndpointGroup { // Convert the object via JSON copying to the type that was requested. ret := &beta.NetworkEndpointGroup{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.NetworkEndpointGroup via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.NetworkEndpointGroup via JSON: %v", m.Obj, err) } return ret } @@ -1039,7 +1039,7 @@ func (m *MockProjectsObj) ToGA() *ga.Project { // Convert the object via JSON copying to the type that was requested. ret := &ga.Project{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Project via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Project via JSON: %v", m.Obj, err) } return ret } @@ -1059,7 +1059,7 @@ func (m *MockRegionBackendServicesObj) ToAlpha() *alpha.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &alpha.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -1072,7 +1072,7 @@ func (m *MockRegionBackendServicesObj) ToGA() *ga.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &ga.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -1084,15 +1084,15 @@ type MockRegionDisksObj struct { Obj interface{} } -// ToBeta retrieves the given version of the object. -func (m *MockRegionDisksObj) ToBeta() *beta.Disk { - if ret, ok := m.Obj.(*beta.Disk); ok { +// ToGA retrieves the given version of the object. +func (m *MockRegionDisksObj) ToGA() *ga.Disk { + if ret, ok := m.Obj.(*ga.Disk); ok { return ret } // Convert the object via JSON copying to the type that was requested. - ret := &beta.Disk{} + ret := &ga.Disk{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.Disk via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Disk via JSON: %v", m.Obj, err) } return ret } @@ -1112,7 +1112,7 @@ func (m *MockRegionsObj) ToGA() *ga.Region { // Convert the object via JSON copying to the type that was requested. ret := &ga.Region{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Region via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Region via JSON: %v", m.Obj, err) } return ret } @@ -1132,7 +1132,7 @@ func (m *MockRoutesObj) ToGA() *ga.Route { // Convert the object via JSON copying to the type that was requested. ret := &ga.Route{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Route via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Route via JSON: %v", m.Obj, err) } return ret } @@ -1152,7 +1152,7 @@ func (m *MockSecurityPoliciesObj) ToBeta() *beta.SecurityPolicy { // Convert the object via JSON copying to the type that was requested. ret := &beta.SecurityPolicy{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.SecurityPolicy via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.SecurityPolicy via JSON: %v", m.Obj, err) } return ret } @@ -1172,7 +1172,7 @@ func (m *MockSslCertificatesObj) ToGA() *ga.SslCertificate { // Convert the object via JSON copying to the type that was requested. ret := &ga.SslCertificate{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.SslCertificate via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.SslCertificate via JSON: %v", m.Obj, err) } return ret } @@ -1192,7 +1192,7 @@ func (m *MockTargetHttpProxiesObj) ToGA() *ga.TargetHttpProxy { // Convert the object via JSON copying to the type that was requested. ret := &ga.TargetHttpProxy{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.TargetHttpProxy via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.TargetHttpProxy via JSON: %v", m.Obj, err) } return ret } @@ -1212,7 +1212,7 @@ func (m *MockTargetHttpsProxiesObj) ToGA() *ga.TargetHttpsProxy { // Convert the object via JSON copying to the type that was requested. ret := &ga.TargetHttpsProxy{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.TargetHttpsProxy via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.TargetHttpsProxy via JSON: %v", m.Obj, err) } return ret } @@ -1232,7 +1232,7 @@ func (m *MockTargetPoolsObj) ToGA() *ga.TargetPool { // Convert the object via JSON copying to the type that was requested. ret := &ga.TargetPool{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.TargetPool via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.TargetPool via JSON: %v", m.Obj, err) } return ret } @@ -1252,7 +1252,7 @@ func (m *MockUrlMapsObj) ToGA() *ga.UrlMap { // Convert the object via JSON copying to the type that was requested. ret := &ga.UrlMap{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.UrlMap via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.UrlMap via JSON: %v", m.Obj, err) } return ret } @@ -1272,7 +1272,7 @@ func (m *MockZonesObj) ToGA() *ga.Zone { // Convert the object via JSON copying to the type that was requested. ret := &ga.Zone{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Zone via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Zone via JSON: %v", m.Obj, err) } return ret } @@ -1332,7 +1332,7 @@ type MockAddresses struct { func (m *MockAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -1344,12 +1344,12 @@ func (m *MockAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, er defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -1357,7 +1357,7 @@ func (m *MockAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, er Code: http.StatusNotFound, Message: fmt.Sprintf("MockAddresses %v not found", key), } - glog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -1365,7 +1365,7 @@ func (m *MockAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, er func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -1375,7 +1375,7 @@ func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ( if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -1391,7 +1391,7 @@ func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ( objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -1399,7 +1399,7 @@ func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ( func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -1411,7 +1411,7 @@ func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addre defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -1419,7 +1419,7 @@ func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addre Code: http.StatusConflict, Message: fmt.Sprintf("MockAddresses %v exists", key), } - glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -1428,7 +1428,7 @@ func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addre obj.SelfLink = SelfLink(meta.VersionGA, projectID, "addresses", key) m.Objects[*key] = &MockAddressesObj{obj} - glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -1436,7 +1436,7 @@ func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addre func (m *MockAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -1448,7 +1448,7 @@ func (m *MockAddresses) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -1456,12 +1456,12 @@ func (m *MockAddresses) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockAddresses %v not found", key), } - glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAddresses.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAddresses.Delete(%v, %v) = nil", ctx, key) return nil } @@ -1477,10 +1477,10 @@ type GCEAddresses struct { // Get the Address named by key. func (g *GCEAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { - glog.V(5).Infof("GCEAddresses.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") @@ -1490,21 +1490,21 @@ func (g *GCEAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, err Version: meta.Version("ga"), Service: "Addresses", } - glog.V(5).Infof("GCEAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Addresses.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Address objects. func (g *GCEAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Address, error) { - glog.V(5).Infof("GCEAddresses.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEAddresses.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") rk := &RateLimitKey{ ProjectID: projectID, @@ -1515,30 +1515,30 @@ func (g *GCEAddresses) List(ctx context.Context, region string, fl *filter.F) ([ if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.GA.Addresses.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Address f := func(l *ga.AddressList) error { - glog.V(5).Infof("GCEAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -1546,9 +1546,9 @@ func (g *GCEAddresses) List(ctx context.Context, region string, fl *filter.F) ([ // Insert Address with key of value obj. func (g *GCEAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { - glog.V(5).Infof("GCEAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") @@ -1558,9 +1558,9 @@ func (g *GCEAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addres Version: meta.Version("ga"), Service: "Addresses", } - glog.V(5).Infof("GCEAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -1569,20 +1569,20 @@ func (g *GCEAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addres op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Address referenced by key. func (g *GCEAddresses) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAddresses.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") @@ -1592,9 +1592,9 @@ func (g *GCEAddresses) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Addresses", } - glog.V(5).Infof("GCEAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Addresses.Delete(projectID, key.Region, key.Name) @@ -1602,12 +1602,12 @@ func (g *GCEAddresses) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -1666,7 +1666,7 @@ type MockAlphaAddresses struct { func (m *MockAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -1678,12 +1678,12 @@ func (m *MockAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Add defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -1691,7 +1691,7 @@ func (m *MockAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Add Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaAddresses %v not found", key), } - glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -1699,7 +1699,7 @@ func (m *MockAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Add func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -1709,7 +1709,7 @@ func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -1725,7 +1725,7 @@ func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -1733,7 +1733,7 @@ func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alpha.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -1745,7 +1745,7 @@ func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alp defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -1753,7 +1753,7 @@ func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alp Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaAddresses %v exists", key), } - glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -1762,7 +1762,7 @@ func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alp obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "addresses", key) m.Objects[*key] = &MockAddressesObj{obj} - glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -1770,7 +1770,7 @@ func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alp func (m *MockAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -1782,7 +1782,7 @@ func (m *MockAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -1790,12 +1790,12 @@ func (m *MockAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaAddresses %v not found", key), } - glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = nil", ctx, key) return nil } @@ -1811,10 +1811,10 @@ type GCEAlphaAddresses struct { // Get the Address named by key. func (g *GCEAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Address, error) { - glog.V(5).Infof("GCEAlphaAddresses.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") @@ -1824,21 +1824,21 @@ func (g *GCEAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Addr Version: meta.Version("alpha"), Service: "Addresses", } - glog.V(5).Infof("GCEAlphaAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.Addresses.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Address objects. func (g *GCEAlphaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Address, error) { - glog.V(5).Infof("GCEAlphaAddresses.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEAlphaAddresses.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") rk := &RateLimitKey{ ProjectID: projectID, @@ -1849,30 +1849,30 @@ func (g *GCEAlphaAddresses) List(ctx context.Context, region string, fl *filter. if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.Alpha.Addresses.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.Address f := func(l *alpha.AddressList) error { - glog.V(5).Infof("GCEAlphaAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -1880,9 +1880,9 @@ func (g *GCEAlphaAddresses) List(ctx context.Context, region string, fl *filter. // Insert Address with key of value obj. func (g *GCEAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alpha.Address) error { - glog.V(5).Infof("GCEAlphaAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") @@ -1892,9 +1892,9 @@ func (g *GCEAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alph Version: meta.Version("alpha"), Service: "Addresses", } - glog.V(5).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -1903,20 +1903,20 @@ func (g *GCEAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alph op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Address referenced by key. func (g *GCEAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaAddresses.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") @@ -1926,9 +1926,9 @@ func (g *GCEAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("alpha"), Service: "Addresses", } - glog.V(5).Infof("GCEAlphaAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Addresses.Delete(projectID, key.Region, key.Name) @@ -1936,12 +1936,12 @@ func (g *GCEAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -2000,7 +2000,7 @@ type MockBetaAddresses struct { func (m *MockBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -2012,12 +2012,12 @@ func (m *MockBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Addre defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -2025,7 +2025,7 @@ func (m *MockBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Addre Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaAddresses %v not found", key), } - glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -2033,7 +2033,7 @@ func (m *MockBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Addre func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -2043,7 +2043,7 @@ func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter. if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -2059,7 +2059,7 @@ func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter. objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -2067,7 +2067,7 @@ func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter. func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -2079,7 +2079,7 @@ func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -2087,7 +2087,7 @@ func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaAddresses %v exists", key), } - glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -2096,7 +2096,7 @@ func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "addresses", key) m.Objects[*key] = &MockAddressesObj{obj} - glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -2104,7 +2104,7 @@ func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta func (m *MockBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -2116,7 +2116,7 @@ func (m *MockBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -2124,12 +2124,12 @@ func (m *MockBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaAddresses %v not found", key), } - glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = nil", ctx, key) return nil } @@ -2145,10 +2145,10 @@ type GCEBetaAddresses struct { // Get the Address named by key. func (g *GCEBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Address, error) { - glog.V(5).Infof("GCEBetaAddresses.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") @@ -2158,21 +2158,21 @@ func (g *GCEBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Addres Version: meta.Version("beta"), Service: "Addresses", } - glog.V(5).Infof("GCEBetaAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.Addresses.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Address objects. func (g *GCEBetaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Address, error) { - glog.V(5).Infof("GCEBetaAddresses.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEBetaAddresses.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") rk := &RateLimitKey{ ProjectID: projectID, @@ -2183,30 +2183,30 @@ func (g *GCEBetaAddresses) List(ctx context.Context, region string, fl *filter.F if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEBetaAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.Beta.Addresses.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.Address f := func(l *beta.AddressList) error { - glog.V(5).Infof("GCEBetaAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -2214,9 +2214,9 @@ func (g *GCEBetaAddresses) List(ctx context.Context, region string, fl *filter.F // Insert Address with key of value obj. func (g *GCEBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta.Address) error { - glog.V(5).Infof("GCEBetaAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") @@ -2226,9 +2226,9 @@ func (g *GCEBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta. Version: meta.Version("beta"), Service: "Addresses", } - glog.V(5).Infof("GCEBetaAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -2237,20 +2237,20 @@ func (g *GCEBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Address referenced by key. func (g *GCEBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaAddresses.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") @@ -2260,9 +2260,9 @@ func (g *GCEBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("beta"), Service: "Addresses", } - glog.V(5).Infof("GCEBetaAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Addresses.Delete(projectID, key.Region, key.Name) @@ -2270,12 +2270,12 @@ func (g *GCEBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -2334,7 +2334,7 @@ type MockGlobalAddresses struct { func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -2346,12 +2346,12 @@ func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Addre defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -2359,7 +2359,7 @@ func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Addre Code: http.StatusNotFound, Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), } - glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -2367,7 +2367,7 @@ func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Addre func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -2377,7 +2377,7 @@ func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Add if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -2390,7 +2390,7 @@ func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Add objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -2398,7 +2398,7 @@ func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Add func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -2410,7 +2410,7 @@ func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -2418,7 +2418,7 @@ func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga Code: http.StatusConflict, Message: fmt.Sprintf("MockGlobalAddresses %v exists", key), } - glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -2427,7 +2427,7 @@ func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga obj.SelfLink = SelfLink(meta.VersionGA, projectID, "addresses", key) m.Objects[*key] = &MockGlobalAddressesObj{obj} - glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -2435,7 +2435,7 @@ func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga func (m *MockGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -2447,7 +2447,7 @@ func (m *MockGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -2455,12 +2455,12 @@ func (m *MockGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), } - glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = nil", ctx, key) return nil } @@ -2476,10 +2476,10 @@ type GCEGlobalAddresses struct { // Get the Address named by key. func (g *GCEGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { - glog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") @@ -2489,21 +2489,21 @@ func (g *GCEGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Addres Version: meta.Version("ga"), Service: "GlobalAddresses", } - glog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.GlobalAddresses.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Address objects. func (g *GCEGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { - glog.V(5).Infof("GCEGlobalAddresses.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") rk := &RateLimitKey{ ProjectID: projectID, @@ -2514,30 +2514,30 @@ func (g *GCEGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Addr if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEGlobalAddresses.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.GlobalAddresses.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Address f := func(l *ga.AddressList) error { - glog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -2545,9 +2545,9 @@ func (g *GCEGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Addr // Insert Address with key of value obj. func (g *GCEGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { - glog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") @@ -2557,9 +2557,9 @@ func (g *GCEGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga. Version: meta.Version("ga"), Service: "GlobalAddresses", } - glog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -2568,20 +2568,20 @@ func (g *GCEGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Address referenced by key. func (g *GCEGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") @@ -2591,9 +2591,9 @@ func (g *GCEGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "GlobalAddresses", } - glog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.GlobalAddresses.Delete(projectID, key.Name) @@ -2602,12 +2602,12 @@ func (g *GCEGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -2672,7 +2672,7 @@ type MockBackendServices struct { func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -2684,12 +2684,12 @@ func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.Backe defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -2697,7 +2697,7 @@ func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.Backe Code: http.StatusNotFound, Message: fmt.Sprintf("MockBackendServices %v not found", key), } - glog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -2705,7 +2705,7 @@ func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.Backe func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -2715,7 +2715,7 @@ func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Bac if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -2728,7 +2728,7 @@ func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Bac objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -2736,7 +2736,7 @@ func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Bac func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -2748,7 +2748,7 @@ func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -2756,7 +2756,7 @@ func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga Code: http.StatusConflict, Message: fmt.Sprintf("MockBackendServices %v exists", key), } - glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -2765,7 +2765,7 @@ func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga obj.SelfLink = SelfLink(meta.VersionGA, projectID, "backendServices", key) m.Objects[*key] = &MockBackendServicesObj{obj} - glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -2773,7 +2773,7 @@ func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga func (m *MockBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -2785,7 +2785,7 @@ func (m *MockBackendServices) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -2793,12 +2793,12 @@ func (m *MockBackendServices) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockBackendServices %v not found", key), } - glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -2838,10 +2838,10 @@ type GCEBackendServices struct { // Get the BackendService named by key. func (g *GCEBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { - glog.V(5).Infof("GCEBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -2851,21 +2851,21 @@ func (g *GCEBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.Backen Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.BackendServices.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCEBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { - glog.V(5).Infof("GCEBackendServices.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBackendServices.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -2876,30 +2876,30 @@ func (g *GCEBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Back if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.BackendServices.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.BackendService f := func(l *ga.BackendServiceList) error { - glog.V(5).Infof("GCEBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -2907,9 +2907,9 @@ func (g *GCEBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Back // Insert BackendService with key of value obj. func (g *GCEBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { - glog.V(5).Infof("GCEBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -2919,9 +2919,9 @@ func (g *GCEBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga. Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -2930,20 +2930,20 @@ func (g *GCEBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCEBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -2953,9 +2953,9 @@ func (g *GCEBackendServices) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.BackendServices.Delete(projectID, key.Name) @@ -2964,21 +2964,21 @@ func (g *GCEBackendServices) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // GetHealth is a method on GCEBackendServices. func (g *GCEBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { - glog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -2988,25 +2988,25 @@ func (g *GCEBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.BackendServices.GetHealth(projectID, key.Name, arg0) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err } // Patch is a method on GCEBackendServices. func (g *GCEBackendServices) Patch(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - glog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -3016,30 +3016,30 @@ func (g *GCEBackendServices) Patch(ctx context.Context, key *meta.Key, arg0 *ga. Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.BackendServices.Patch(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } // Update is a method on GCEBackendServices. func (g *GCEBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - glog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -3049,21 +3049,21 @@ func (g *GCEBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.BackendServices.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -3126,7 +3126,7 @@ type MockBetaBackendServices struct { func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -3138,12 +3138,12 @@ func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -3151,7 +3151,7 @@ func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaBackendServices %v not found", key), } - glog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -3159,7 +3159,7 @@ func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*beta.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -3169,7 +3169,7 @@ func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*be if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -3182,7 +3182,7 @@ func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*be objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -3190,7 +3190,7 @@ func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*be func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -3202,7 +3202,7 @@ func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -3210,7 +3210,7 @@ func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaBackendServices %v exists", key), } - glog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -3219,7 +3219,7 @@ func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "backendServices", key) m.Objects[*key] = &MockBackendServicesObj{obj} - glog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -3227,7 +3227,7 @@ func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj func (m *MockBetaBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -3239,7 +3239,7 @@ func (m *MockBetaBackendServices) Delete(ctx context.Context, key *meta.Key) err defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -3247,12 +3247,12 @@ func (m *MockBetaBackendServices) Delete(ctx context.Context, key *meta.Key) err Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaBackendServices %v not found", key), } - glog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -3284,10 +3284,10 @@ type GCEBetaBackendServices struct { // Get the BackendService named by key. func (g *GCEBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) { - glog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3297,21 +3297,21 @@ func (g *GCEBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta. Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.BackendServices.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCEBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*beta.BackendService, error) { - glog.V(5).Infof("GCEBetaBackendServices.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -3322,30 +3322,30 @@ func (g *GCEBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*bet if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Beta.BackendServices.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.BackendService f := func(l *beta.BackendServiceList) error { - glog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -3353,9 +3353,9 @@ func (g *GCEBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*bet // Insert BackendService with key of value obj. func (g *GCEBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error { - glog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3365,9 +3365,9 @@ func (g *GCEBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -3376,20 +3376,20 @@ func (g *GCEBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCEBetaBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3399,9 +3399,9 @@ func (g *GCEBetaBackendServices) Delete(ctx context.Context, key *meta.Key) erro Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.BackendServices.Delete(projectID, key.Name) @@ -3410,21 +3410,21 @@ func (g *GCEBetaBackendServices) Delete(ctx context.Context, key *meta.Key) erro op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // SetSecurityPolicy is a method on GCEBetaBackendServices. func (g *GCEBetaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyReference) error { - glog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3434,30 +3434,30 @@ func (g *GCEBetaBackendServices) SetSecurityPolicy(ctx context.Context, key *met Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.BackendServices.SetSecurityPolicy(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } // Update is a method on GCEBetaBackendServices. func (g *GCEBetaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *beta.BackendService) error { - glog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3467,21 +3467,21 @@ func (g *GCEBetaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.BackendServices.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -3544,7 +3544,7 @@ type MockAlphaBackendServices struct { func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -3556,12 +3556,12 @@ func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alp defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -3569,7 +3569,7 @@ func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alp Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), } - glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -3577,7 +3577,7 @@ func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alp func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -3587,7 +3587,7 @@ func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*a if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -3600,7 +3600,7 @@ func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*a objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -3608,7 +3608,7 @@ func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*a func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -3620,7 +3620,7 @@ func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, ob defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -3628,7 +3628,7 @@ func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, ob Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaBackendServices %v exists", key), } - glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -3637,7 +3637,7 @@ func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, ob obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "backendServices", key) m.Objects[*key] = &MockBackendServicesObj{obj} - glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -3645,7 +3645,7 @@ func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, ob func (m *MockAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -3657,7 +3657,7 @@ func (m *MockAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) er defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -3665,12 +3665,12 @@ func (m *MockAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) er Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), } - glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -3702,10 +3702,10 @@ type GCEAlphaBackendServices struct { // Get the BackendService named by key. func (g *GCEAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { - glog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3715,21 +3715,21 @@ func (g *GCEAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alph Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.BackendServices.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCEAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { - glog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -3740,30 +3740,30 @@ func (g *GCEAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*al if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Alpha.BackendServices.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.BackendService f := func(l *alpha.BackendServiceList) error { - glog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -3771,9 +3771,9 @@ func (g *GCEAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*al // Insert BackendService with key of value obj. func (g *GCEAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { - glog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3783,9 +3783,9 @@ func (g *GCEAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -3794,20 +3794,20 @@ func (g *GCEAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3817,9 +3817,9 @@ func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) err Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.BackendServices.Delete(projectID, key.Name) @@ -3828,21 +3828,21 @@ func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) err op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // SetSecurityPolicy is a method on GCEAlphaBackendServices. func (g *GCEAlphaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *alpha.SecurityPolicyReference) error { - glog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3852,30 +3852,30 @@ func (g *GCEAlphaBackendServices) SetSecurityPolicy(ctx context.Context, key *me Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.BackendServices.SetSecurityPolicy(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } // Update is a method on GCEAlphaBackendServices. func (g *GCEAlphaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { - glog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3885,21 +3885,21 @@ func (g *GCEAlphaBackendServices) Update(ctx context.Context, key *meta.Key, arg Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.BackendServices.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -3962,7 +3962,7 @@ type MockRegionBackendServices struct { func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -3974,12 +3974,12 @@ func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -3987,7 +3987,7 @@ func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga Code: http.StatusNotFound, Message: fmt.Sprintf("MockRegionBackendServices %v not found", key), } - glog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -3995,7 +3995,7 @@ func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*ga.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -4005,7 +4005,7 @@ func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -4021,7 +4021,7 @@ func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -4029,7 +4029,7 @@ func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -4041,7 +4041,7 @@ func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, o defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -4049,7 +4049,7 @@ func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, o Code: http.StatusConflict, Message: fmt.Sprintf("MockRegionBackendServices %v exists", key), } - glog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -4058,7 +4058,7 @@ func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, o obj.SelfLink = SelfLink(meta.VersionGA, projectID, "backendServices", key) m.Objects[*key] = &MockRegionBackendServicesObj{obj} - glog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -4066,7 +4066,7 @@ func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, o func (m *MockRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -4078,7 +4078,7 @@ func (m *MockRegionBackendServices) Delete(ctx context.Context, key *meta.Key) e defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -4086,12 +4086,12 @@ func (m *MockRegionBackendServices) Delete(ctx context.Context, key *meta.Key) e Code: http.StatusNotFound, Message: fmt.Sprintf("MockRegionBackendServices %v not found", key), } - glog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -4123,10 +4123,10 @@ type GCERegionBackendServices struct { // Get the BackendService named by key. func (g *GCERegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { - glog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4136,21 +4136,21 @@ func (g *GCERegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga. Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.RegionBackendServices.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCERegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCERegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*ga.BackendService, error) { - glog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -4161,30 +4161,30 @@ func (g *GCERegionBackendServices) List(ctx context.Context, region string, fl * if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.GA.RegionBackendServices.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.BackendService f := func(l *ga.BackendServiceList) error { - glog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -4192,9 +4192,9 @@ func (g *GCERegionBackendServices) List(ctx context.Context, region string, fl * // Insert BackendService with key of value obj. func (g *GCERegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { - glog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4204,9 +4204,9 @@ func (g *GCERegionBackendServices) Insert(ctx context.Context, key *meta.Key, ob Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -4215,20 +4215,20 @@ func (g *GCERegionBackendServices) Insert(ctx context.Context, key *meta.Key, ob op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCERegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4238,9 +4238,9 @@ func (g *GCERegionBackendServices) Delete(ctx context.Context, key *meta.Key) er Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.RegionBackendServices.Delete(projectID, key.Region, key.Name) @@ -4248,21 +4248,21 @@ func (g *GCERegionBackendServices) Delete(ctx context.Context, key *meta.Key) er op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // GetHealth is a method on GCERegionBackendServices. func (g *GCERegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { - glog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4272,25 +4272,25 @@ func (g *GCERegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.RegionBackendServices.GetHealth(projectID, key.Region, key.Name, arg0) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err } // Update is a method on GCERegionBackendServices. func (g *GCERegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - glog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4300,21 +4300,21 @@ func (g *GCERegionBackendServices) Update(ctx context.Context, key *meta.Key, ar Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.RegionBackendServices.Update(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -4377,7 +4377,7 @@ type MockAlphaRegionBackendServices struct { func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -4389,12 +4389,12 @@ func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -4402,7 +4402,7 @@ func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), } - glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -4410,7 +4410,7 @@ func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -4420,7 +4420,7 @@ func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -4436,7 +4436,7 @@ func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -4444,7 +4444,7 @@ func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -4456,7 +4456,7 @@ func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -4464,7 +4464,7 @@ func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.K Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaRegionBackendServices %v exists", key), } - glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -4473,7 +4473,7 @@ func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.K obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "backendServices", key) m.Objects[*key] = &MockRegionBackendServicesObj{obj} - glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -4481,7 +4481,7 @@ func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.K func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -4493,7 +4493,7 @@ func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -4501,12 +4501,12 @@ func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.K Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), } - glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -4538,10 +4538,10 @@ type GCEAlphaRegionBackendServices struct { // Get the BackendService named by key. func (g *GCEAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { - glog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4551,21 +4551,21 @@ func (g *GCEAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.RegionBackendServices.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCEAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { - glog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -4576,30 +4576,30 @@ func (g *GCEAlphaRegionBackendServices) List(ctx context.Context, region string, if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.Alpha.RegionBackendServices.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.BackendService f := func(l *alpha.BackendServiceList) error { - glog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -4607,9 +4607,9 @@ func (g *GCEAlphaRegionBackendServices) List(ctx context.Context, region string, // Insert BackendService with key of value obj. func (g *GCEAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { - glog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4619,9 +4619,9 @@ func (g *GCEAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -4630,20 +4630,20 @@ func (g *GCEAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Ke op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCEAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4653,9 +4653,9 @@ func (g *GCEAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.RegionBackendServices.Delete(projectID, key.Region, key.Name) @@ -4663,21 +4663,21 @@ func (g *GCEAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Ke op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // GetHealth is a method on GCEAlphaRegionBackendServices. func (g *GCEAlphaRegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) { - glog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4687,25 +4687,25 @@ func (g *GCEAlphaRegionBackendServices) GetHealth(ctx context.Context, key *meta Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.RegionBackendServices.GetHealth(projectID, key.Region, key.Name, arg0) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err } // Update is a method on GCEAlphaRegionBackendServices. func (g *GCEAlphaRegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { - glog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4715,21 +4715,21 @@ func (g *GCEAlphaRegionBackendServices) Update(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.RegionBackendServices.Update(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -4790,7 +4790,7 @@ type MockDisks struct { func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -4802,12 +4802,12 @@ func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -4815,7 +4815,7 @@ func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { Code: http.StatusNotFound, Message: fmt.Sprintf("MockDisks %v not found", key), } - glog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -4823,7 +4823,7 @@ func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -4833,7 +4833,7 @@ func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga. if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockDisks.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockDisks.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -4849,7 +4849,7 @@ func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga. objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -4857,7 +4857,7 @@ func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga. func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -4869,7 +4869,7 @@ func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) err defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -4877,7 +4877,7 @@ func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) err Code: http.StatusConflict, Message: fmt.Sprintf("MockDisks %v exists", key), } - glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -4886,7 +4886,7 @@ func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) err obj.SelfLink = SelfLink(meta.VersionGA, projectID, "disks", key) m.Objects[*key] = &MockDisksObj{obj} - glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -4894,7 +4894,7 @@ func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) err func (m *MockDisks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -4906,7 +4906,7 @@ func (m *MockDisks) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -4914,12 +4914,12 @@ func (m *MockDisks) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockDisks %v not found", key), } - glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockDisks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -4943,10 +4943,10 @@ type GCEDisks struct { // Get the Disk named by key. func (g *GCEDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { - glog.V(5).Infof("GCEDisks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEDisks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") @@ -4956,21 +4956,21 @@ func (g *GCEDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { Version: meta.Version("ga"), Service: "Disks", } - glog.V(5).Infof("GCEDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Disks.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Disk objects. func (g *GCEDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { - glog.V(5).Infof("GCEDisks.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEDisks.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") rk := &RateLimitKey{ ProjectID: projectID, @@ -4981,30 +4981,30 @@ func (g *GCEDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.D if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.GA.Disks.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Disk f := func(l *ga.DiskList) error { - glog.V(5).Infof("GCEDisks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEDisks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -5012,9 +5012,9 @@ func (g *GCEDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.D // Insert Disk with key of value obj. func (g *GCEDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { - glog.V(5).Infof("GCEDisks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEDisks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") @@ -5024,9 +5024,9 @@ func (g *GCEDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) erro Version: meta.Version("ga"), Service: "Disks", } - glog.V(5).Infof("GCEDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -5035,20 +5035,20 @@ func (g *GCEDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) erro op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Disk referenced by key. func (g *GCEDisks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEDisks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEDisks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") @@ -5058,9 +5058,9 @@ func (g *GCEDisks) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Disks", } - glog.V(5).Infof("GCEDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Disks.Delete(projectID, key.Zone, key.Name) @@ -5068,21 +5068,21 @@ func (g *GCEDisks) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) return err } // Resize is a method on GCEDisks. func (g *GCEDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.DisksResizeRequest) error { - glog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") @@ -5092,36 +5092,36 @@ func (g *GCEDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.DisksResi Version: meta.Version("ga"), Service: "Disks", } - glog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Disks.Resize(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } -// BetaRegionDisks is an interface that allows for mocking of RegionDisks. -type BetaRegionDisks interface { - Get(ctx context.Context, key *meta.Key) (*beta.Disk, error) - List(ctx context.Context, region string, fl *filter.F) ([]*beta.Disk, error) - Insert(ctx context.Context, key *meta.Key, obj *beta.Disk) error +// RegionDisks is an interface that allows for mocking of RegionDisks. +type RegionDisks interface { + Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) + List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error Delete(ctx context.Context, key *meta.Key) error - Resize(context.Context, *meta.Key, *beta.RegionDisksResizeRequest) error + Resize(context.Context, *meta.Key, *ga.RegionDisksResizeRequest) error } -// NewMockBetaRegionDisks returns a new mock for RegionDisks. -func NewMockBetaRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisksObj) *MockBetaRegionDisks { - mock := &MockBetaRegionDisks{ +// NewMockRegionDisks returns a new mock for RegionDisks. +func NewMockRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisksObj) *MockRegionDisks { + mock := &MockRegionDisks{ ProjectRouter: pr, Objects: objs, @@ -5132,8 +5132,8 @@ func NewMockBetaRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisks return mock } -// MockBetaRegionDisks is the mock for RegionDisks. -type MockBetaRegionDisks struct { +// MockRegionDisks is the mock for RegionDisks. +type MockRegionDisks struct { Lock sync.Mutex ProjectRouter ProjectRouter @@ -5152,11 +5152,11 @@ type MockBetaRegionDisks struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionDisks) (bool, *beta.Disk, error) - ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaRegionDisks) (bool, []*beta.Disk, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *beta.Disk, m *MockBetaRegionDisks) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionDisks) (bool, error) - ResizeHook func(context.Context, *meta.Key, *beta.RegionDisksResizeRequest, *MockBetaRegionDisks) error + GetHook func(ctx context.Context, key *meta.Key, m *MockRegionDisks) (bool, *ga.Disk, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockRegionDisks) (bool, []*ga.Disk, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Disk, m *MockRegionDisks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockRegionDisks) (bool, error) + ResizeHook func(context.Context, *meta.Key, *ga.RegionDisksResizeRequest, *MockRegionDisks) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -5164,10 +5164,10 @@ type MockBetaRegionDisks struct { } // Get returns the object from the mock. -func (m *MockBetaRegionDisks) Get(ctx context.Context, key *meta.Key) (*beta.Disk, error) { +func (m *MockRegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -5179,28 +5179,28 @@ func (m *MockBetaRegionDisks) Get(ctx context.Context, key *meta.Key) (*beta.Dis defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToGA() + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaRegionDisks %v not found", key), + Message: fmt.Sprintf("MockRegionDisks %v not found", key), } - glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock in the given region. -func (m *MockBetaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Disk, error) { +func (m *MockRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockBetaRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -5210,31 +5210,31 @@ func (m *MockBetaRegionDisks) List(ctx context.Context, region string, fl *filte if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*beta.Disk + var objs []*ga.Disk for key, obj := range m.Objects { if key.Region != region { continue } - if !fl.Match(obj.ToBeta()) { + if !fl.Match(obj.ToGA()) { continue } - objs = append(objs, obj.ToBeta()) + objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockBetaRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockBetaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *beta.Disk) error { +func (m *MockRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -5246,32 +5246,32 @@ func (m *MockBetaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *be defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockBetaRegionDisks %v exists", key), + Message: fmt.Sprintf("MockRegionDisks %v exists", key), } - glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "beta", "disks") - obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "disks", key) + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "disks") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "disks", key) m.Objects[*key] = &MockRegionDisksObj{obj} - glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockBetaRegionDisks) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockRegionDisks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -5283,207 +5283,207 @@ func (m *MockBetaRegionDisks) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaRegionDisks %v not found", key), + Message: fmt.Sprintf("MockRegionDisks %v not found", key), } - glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockBetaRegionDisks) Obj(o *beta.Disk) *MockRegionDisksObj { +func (m *MockRegionDisks) Obj(o *ga.Disk) *MockRegionDisksObj { return &MockRegionDisksObj{o} } // Resize is a mock for the corresponding method. -func (m *MockBetaRegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *beta.RegionDisksResizeRequest) error { +func (m *MockRegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.RegionDisksResizeRequest) error { if m.ResizeHook != nil { return m.ResizeHook(ctx, key, arg0, m) } return nil } -// GCEBetaRegionDisks is a simplifying adapter for the GCE RegionDisks. -type GCEBetaRegionDisks struct { +// GCERegionDisks is a simplifying adapter for the GCE RegionDisks. +type GCERegionDisks struct { s *Service } // Get the Disk named by key. -func (g *GCEBetaRegionDisks) Get(ctx context.Context, key *meta.Key) (*beta.Disk, error) { - glog.V(5).Infof("GCEBetaRegionDisks.Get(%v, %v): called", ctx, key) +func (g *GCERegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { + klog.V(5).Infof("GCERegionDisks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaRegionDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "RegionDisks", } - glog.V(5).Infof("GCEBetaRegionDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Beta.RegionDisks.Get(projectID, key.Region, key.Name) + call := g.s.GA.RegionDisks.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaRegionDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegionDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Disk objects. -func (g *GCEBetaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Disk, error) { - glog.V(5).Infof("GCEBetaRegionDisks.List(%v, %v, %v) called", ctx, region, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") +func (g *GCERegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) { + klog.V(5).Infof("GCERegionDisks.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "RegionDisks", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaRegionDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) - call := g.s.Beta.RegionDisks.List(projectID, region) + klog.V(5).Infof("GCERegionDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.GA.RegionDisks.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*beta.Disk - f := func(l *beta.DiskList) error { - glog.V(5).Infof("GCEBetaRegionDisks.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*ga.Disk + f := func(l *ga.DiskList) error { + klog.V(5).Infof("GCERegionDisks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaRegionDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaRegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // Insert Disk with key of value obj. -func (g *GCEBetaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *beta.Disk) error { - glog.V(5).Infof("GCEBetaRegionDisks.Insert(%v, %v, %+v): called", ctx, key, obj) +func (g *GCERegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { + klog.V(5).Infof("GCERegionDisks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "RegionDisks", } - glog.V(5).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.Beta.RegionDisks.Insert(projectID, key.Region, obj) + call := g.s.GA.RegionDisks.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaRegionDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Disk referenced by key. -func (g *GCEBetaRegionDisks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaRegionDisks.Delete(%v, %v): called", ctx, key) +func (g *GCERegionDisks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCERegionDisks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaRegionDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "RegionDisks", } - glog.V(5).Infof("GCEBetaRegionDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.RegionDisks.Delete(projectID, key.Region, key.Name) + call := g.s.GA.RegionDisks.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } -// Resize is a method on GCEBetaRegionDisks. -func (g *GCEBetaRegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *beta.RegionDisksResizeRequest) error { - glog.V(5).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): called", ctx, key) +// Resize is a method on GCERegionDisks. +func (g *GCERegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.RegionDisksResizeRequest) error { + klog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Resize", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "RegionDisks", } - glog.V(5).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.RegionDisks.Resize(projectID, key.Region, key.Name, arg0) + call := g.s.GA.RegionDisks.Resize(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -5544,7 +5544,7 @@ type MockFirewalls struct { func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -5556,12 +5556,12 @@ func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, e defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -5569,7 +5569,7 @@ func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, e Code: http.StatusNotFound, Message: fmt.Sprintf("MockFirewalls %v not found", key), } - glog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -5577,7 +5577,7 @@ func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, e func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -5587,7 +5587,7 @@ func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockFirewalls.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockFirewalls.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -5600,7 +5600,7 @@ func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -5608,7 +5608,7 @@ func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewall) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -5620,7 +5620,7 @@ func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firew defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -5628,7 +5628,7 @@ func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firew Code: http.StatusConflict, Message: fmt.Sprintf("MockFirewalls %v exists", key), } - glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -5637,7 +5637,7 @@ func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firew obj.SelfLink = SelfLink(meta.VersionGA, projectID, "firewalls", key) m.Objects[*key] = &MockFirewallsObj{obj} - glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -5645,7 +5645,7 @@ func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firew func (m *MockFirewalls) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -5657,7 +5657,7 @@ func (m *MockFirewalls) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -5665,12 +5665,12 @@ func (m *MockFirewalls) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockFirewalls %v not found", key), } - glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = nil", ctx, key) return nil } @@ -5694,10 +5694,10 @@ type GCEFirewalls struct { // Get the Firewall named by key. func (g *GCEFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, error) { - glog.V(5).Infof("GCEFirewalls.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEFirewalls.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEFirewalls.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") @@ -5707,21 +5707,21 @@ func (g *GCEFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, er Version: meta.Version("ga"), Service: "Firewalls", } - glog.V(5).Infof("GCEFirewalls.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEFirewalls.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Firewalls.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEFirewalls.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEFirewalls.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Firewall objects. func (g *GCEFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { - glog.V(5).Infof("GCEFirewalls.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEFirewalls.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") rk := &RateLimitKey{ ProjectID: projectID, @@ -5732,30 +5732,30 @@ func (g *GCEFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEFirewalls.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEFirewalls.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.Firewalls.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Firewall f := func(l *ga.FirewallList) error { - glog.V(5).Infof("GCEFirewalls.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEFirewalls.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -5763,9 +5763,9 @@ func (g *GCEFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, // Insert Firewall with key of value obj. func (g *GCEFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewall) error { - glog.V(5).Infof("GCEFirewalls.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEFirewalls.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEFirewalls.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") @@ -5775,9 +5775,9 @@ func (g *GCEFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewa Version: meta.Version("ga"), Service: "Firewalls", } - glog.V(5).Infof("GCEFirewalls.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -5786,20 +5786,20 @@ func (g *GCEFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewa op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEFirewalls.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Firewall referenced by key. func (g *GCEFirewalls) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEFirewalls.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEFirewalls.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEFirewalls.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") @@ -5809,9 +5809,9 @@ func (g *GCEFirewalls) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Firewalls", } - glog.V(5).Infof("GCEFirewalls.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEFirewalls.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Firewalls.Delete(projectID, key.Name) @@ -5820,21 +5820,21 @@ func (g *GCEFirewalls) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEFirewalls. func (g *GCEFirewalls) Update(ctx context.Context, key *meta.Key, arg0 *ga.Firewall) error { - glog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEFirewalls.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") @@ -5844,21 +5844,21 @@ func (g *GCEFirewalls) Update(ctx context.Context, key *meta.Key, arg0 *ga.Firew Version: meta.Version("ga"), Service: "Firewalls", } - glog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Firewalls.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -5917,7 +5917,7 @@ type MockForwardingRules struct { func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -5929,12 +5929,12 @@ func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.Forwa defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -5942,7 +5942,7 @@ func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.Forwa Code: http.StatusNotFound, Message: fmt.Sprintf("MockForwardingRules %v not found", key), } - glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -5950,7 +5950,7 @@ func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.Forwa func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -5960,7 +5960,7 @@ func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filte if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -5976,7 +5976,7 @@ func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filte objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -5984,7 +5984,7 @@ func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filte func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -5996,7 +5996,7 @@ func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -6004,7 +6004,7 @@ func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga Code: http.StatusConflict, Message: fmt.Sprintf("MockForwardingRules %v exists", key), } - glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -6013,7 +6013,7 @@ func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga obj.SelfLink = SelfLink(meta.VersionGA, projectID, "forwardingRules", key) m.Objects[*key] = &MockForwardingRulesObj{obj} - glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -6021,7 +6021,7 @@ func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga func (m *MockForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -6033,7 +6033,7 @@ func (m *MockForwardingRules) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -6041,12 +6041,12 @@ func (m *MockForwardingRules) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockForwardingRules %v not found", key), } - glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } @@ -6062,10 +6062,10 @@ type GCEForwardingRules struct { // Get the ForwardingRule named by key. func (g *GCEForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { - glog.V(5).Infof("GCEForwardingRules.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") @@ -6075,21 +6075,21 @@ func (g *GCEForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.Forwar Version: meta.Version("ga"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.ForwardingRules.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all ForwardingRule objects. func (g *GCEForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { - glog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, @@ -6100,30 +6100,30 @@ func (g *GCEForwardingRules) List(ctx context.Context, region string, fl *filter if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.GA.ForwardingRules.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.ForwardingRule f := func(l *ga.ForwardingRuleList) error { - glog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -6131,9 +6131,9 @@ func (g *GCEForwardingRules) List(ctx context.Context, region string, fl *filter // Insert ForwardingRule with key of value obj. func (g *GCEForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { - glog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") @@ -6143,9 +6143,9 @@ func (g *GCEForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga. Version: meta.Version("ga"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -6154,20 +6154,20 @@ func (g *GCEForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the ForwardingRule referenced by key. func (g *GCEForwardingRules) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") @@ -6177,9 +6177,9 @@ func (g *GCEForwardingRules) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.ForwardingRules.Delete(projectID, key.Region, key.Name) @@ -6187,12 +6187,12 @@ func (g *GCEForwardingRules) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -6251,7 +6251,7 @@ type MockAlphaForwardingRules struct { func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -6263,12 +6263,12 @@ func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alp defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -6276,7 +6276,7 @@ func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alp Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), } - glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -6284,7 +6284,7 @@ func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alp func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -6294,7 +6294,7 @@ func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl * if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -6310,7 +6310,7 @@ func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl * objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -6318,7 +6318,7 @@ func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl * func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -6330,7 +6330,7 @@ func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, ob defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -6338,7 +6338,7 @@ func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, ob Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaForwardingRules %v exists", key), } - glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -6347,7 +6347,7 @@ func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, ob obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "forwardingRules", key) m.Objects[*key] = &MockForwardingRulesObj{obj} - glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -6355,7 +6355,7 @@ func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, ob func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -6367,7 +6367,7 @@ func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) er defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -6375,12 +6375,12 @@ func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) er Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), } - glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } @@ -6396,10 +6396,10 @@ type GCEAlphaForwardingRules struct { // Get the ForwardingRule named by key. func (g *GCEAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) { - glog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") @@ -6409,21 +6409,21 @@ func (g *GCEAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alph Version: meta.Version("alpha"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.ForwardingRules.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all ForwardingRule objects. func (g *GCEAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { - glog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, @@ -6434,30 +6434,30 @@ func (g *GCEAlphaForwardingRules) List(ctx context.Context, region string, fl *f if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.Alpha.ForwardingRules.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.ForwardingRule f := func(l *alpha.ForwardingRuleList) error { - glog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -6465,9 +6465,9 @@ func (g *GCEAlphaForwardingRules) List(ctx context.Context, region string, fl *f // Insert ForwardingRule with key of value obj. func (g *GCEAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error { - glog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") @@ -6477,9 +6477,9 @@ func (g *GCEAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj Version: meta.Version("alpha"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -6488,20 +6488,20 @@ func (g *GCEAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the ForwardingRule referenced by key. func (g *GCEAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") @@ -6511,9 +6511,9 @@ func (g *GCEAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) err Version: meta.Version("alpha"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.ForwardingRules.Delete(projectID, key.Region, key.Name) @@ -6521,12 +6521,12 @@ func (g *GCEAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) err op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -6587,7 +6587,7 @@ type MockGlobalForwardingRules struct { func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -6599,12 +6599,12 @@ func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -6612,7 +6612,7 @@ func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga Code: http.StatusNotFound, Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), } - glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -6620,7 +6620,7 @@ func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -6630,7 +6630,7 @@ func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]* if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -6643,7 +6643,7 @@ func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]* objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -6651,7 +6651,7 @@ func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]* func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -6663,7 +6663,7 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -6671,7 +6671,7 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o Code: http.StatusConflict, Message: fmt.Sprintf("MockGlobalForwardingRules %v exists", key), } - glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -6680,7 +6680,7 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o obj.SelfLink = SelfLink(meta.VersionGA, projectID, "forwardingRules", key) m.Objects[*key] = &MockGlobalForwardingRulesObj{obj} - glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -6688,7 +6688,7 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -6700,7 +6700,7 @@ func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) e defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -6708,12 +6708,12 @@ func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) e Code: http.StatusNotFound, Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), } - glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } @@ -6737,10 +6737,10 @@ type GCEGlobalForwardingRules struct { // Get the ForwardingRule named by key. func (g *GCEGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { - glog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") @@ -6750,21 +6750,21 @@ func (g *GCEGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga. Version: meta.Version("ga"), Service: "GlobalForwardingRules", } - glog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.GlobalForwardingRules.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all ForwardingRule objects. func (g *GCEGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { - glog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, @@ -6775,30 +6775,30 @@ func (g *GCEGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*g if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.GlobalForwardingRules.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.ForwardingRule f := func(l *ga.ForwardingRuleList) error { - glog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -6806,9 +6806,9 @@ func (g *GCEGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*g // Insert ForwardingRule with key of value obj. func (g *GCEGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { - glog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") @@ -6818,9 +6818,9 @@ func (g *GCEGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, ob Version: meta.Version("ga"), Service: "GlobalForwardingRules", } - glog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -6829,20 +6829,20 @@ func (g *GCEGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, ob op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the ForwardingRule referenced by key. func (g *GCEGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") @@ -6852,9 +6852,9 @@ func (g *GCEGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) er Version: meta.Version("ga"), Service: "GlobalForwardingRules", } - glog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.GlobalForwardingRules.Delete(projectID, key.Name) @@ -6863,21 +6863,21 @@ func (g *GCEGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) er op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } // SetTarget is a method on GCEGlobalForwardingRules. func (g *GCEGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *ga.TargetReference) error { - glog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") @@ -6887,21 +6887,21 @@ func (g *GCEGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, Version: meta.Version("ga"), Service: "GlobalForwardingRules", } - glog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.GlobalForwardingRules.SetTarget(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -6962,7 +6962,7 @@ type MockHealthChecks struct { func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -6974,12 +6974,12 @@ func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCh defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -6987,7 +6987,7 @@ func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCh Code: http.StatusNotFound, Message: fmt.Sprintf("MockHealthChecks %v not found", key), } - glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -6995,7 +6995,7 @@ func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCh func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -7005,7 +7005,7 @@ func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Health if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -7018,7 +7018,7 @@ func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Health objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -7026,7 +7026,7 @@ func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Health func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -7038,7 +7038,7 @@ func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.He defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -7046,7 +7046,7 @@ func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.He Code: http.StatusConflict, Message: fmt.Sprintf("MockHealthChecks %v exists", key), } - glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -7055,7 +7055,7 @@ func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.He obj.SelfLink = SelfLink(meta.VersionGA, projectID, "healthChecks", key) m.Objects[*key] = &MockHealthChecksObj{obj} - glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -7063,7 +7063,7 @@ func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.He func (m *MockHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -7075,7 +7075,7 @@ func (m *MockHealthChecks) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -7083,12 +7083,12 @@ func (m *MockHealthChecks) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockHealthChecks %v not found", key), } - glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -7112,10 +7112,10 @@ type GCEHealthChecks struct { // Get the HealthCheck named by key. func (g *GCEHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCheck, error) { - glog.V(5).Infof("GCEHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") @@ -7125,21 +7125,21 @@ func (g *GCEHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthChe Version: meta.Version("ga"), Service: "HealthChecks", } - glog.V(5).Infof("GCEHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.HealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HealthCheck objects. func (g *GCEHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { - glog.V(5).Infof("GCEHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -7150,30 +7150,30 @@ func (g *GCEHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthC if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.HealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.HealthCheck f := func(l *ga.HealthCheckList) error { - glog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -7181,9 +7181,9 @@ func (g *GCEHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthC // Insert HealthCheck with key of value obj. func (g *GCEHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HealthCheck) error { - glog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") @@ -7193,9 +7193,9 @@ func (g *GCEHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.Hea Version: meta.Version("ga"), Service: "HealthChecks", } - glog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -7204,20 +7204,20 @@ func (g *GCEHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.Hea op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HealthCheck referenced by key. func (g *GCEHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") @@ -7227,9 +7227,9 @@ func (g *GCEHealthChecks) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "HealthChecks", } - glog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HealthChecks.Delete(projectID, key.Name) @@ -7238,21 +7238,21 @@ func (g *GCEHealthChecks) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEHealthChecks. func (g *GCEHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HealthCheck) error { - glog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") @@ -7262,21 +7262,21 @@ func (g *GCEHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.He Version: meta.Version("ga"), Service: "HealthChecks", } - glog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -7337,7 +7337,7 @@ type MockBetaHealthChecks struct { func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -7349,12 +7349,12 @@ func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.He defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -7362,7 +7362,7 @@ func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.He Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaHealthChecks %v not found", key), } - glog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -7370,7 +7370,7 @@ func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.He func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.HealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -7380,7 +7380,7 @@ func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta. if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -7393,7 +7393,7 @@ func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta. objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -7401,7 +7401,7 @@ func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta. func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -7413,7 +7413,7 @@ func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *b defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -7421,7 +7421,7 @@ func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *b Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaHealthChecks %v exists", key), } - glog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -7430,7 +7430,7 @@ func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *b obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "healthChecks", key) m.Objects[*key] = &MockHealthChecksObj{obj} - glog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -7438,7 +7438,7 @@ func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *b func (m *MockBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -7450,7 +7450,7 @@ func (m *MockBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -7458,12 +7458,12 @@ func (m *MockBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaHealthChecks %v not found", key), } - glog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -7487,10 +7487,10 @@ type GCEBetaHealthChecks struct { // Get the HealthCheck named by key. func (g *GCEBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) { - glog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") @@ -7500,21 +7500,21 @@ func (g *GCEBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.Hea Version: meta.Version("beta"), Service: "HealthChecks", } - glog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.HealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HealthCheck objects. func (g *GCEBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.HealthCheck, error) { - glog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -7525,30 +7525,30 @@ func (g *GCEBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.H if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Beta.HealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.HealthCheck f := func(l *beta.HealthCheckList) error { - glog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -7556,9 +7556,9 @@ func (g *GCEBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.H // Insert HealthCheck with key of value obj. func (g *GCEBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error { - glog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") @@ -7568,9 +7568,9 @@ func (g *GCEBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *be Version: meta.Version("beta"), Service: "HealthChecks", } - glog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -7579,20 +7579,20 @@ func (g *GCEBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *be op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HealthCheck referenced by key. func (g *GCEBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") @@ -7602,9 +7602,9 @@ func (g *GCEBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("beta"), Service: "HealthChecks", } - glog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.HealthChecks.Delete(projectID, key.Name) @@ -7613,21 +7613,21 @@ func (g *GCEBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEBetaHealthChecks. func (g *GCEBetaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *beta.HealthCheck) error { - glog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") @@ -7637,21 +7637,21 @@ func (g *GCEBetaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *b Version: meta.Version("beta"), Service: "HealthChecks", } - glog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.HealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -7712,7 +7712,7 @@ type MockAlphaHealthChecks struct { func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -7724,12 +7724,12 @@ func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha. defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -7737,7 +7737,7 @@ func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha. Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), } - glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -7745,7 +7745,7 @@ func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha. func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -7755,7 +7755,7 @@ func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alph if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -7768,7 +7768,7 @@ func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alph objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -7776,7 +7776,7 @@ func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alph func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -7788,7 +7788,7 @@ func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -7796,7 +7796,7 @@ func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaHealthChecks %v exists", key), } - glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -7805,7 +7805,7 @@ func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "healthChecks", key) m.Objects[*key] = &MockHealthChecksObj{obj} - glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -7813,7 +7813,7 @@ func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -7825,7 +7825,7 @@ func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -7833,12 +7833,12 @@ func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), } - glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -7862,10 +7862,10 @@ type GCEAlphaHealthChecks struct { // Get the HealthCheck named by key. func (g *GCEAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) { - glog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") @@ -7875,21 +7875,21 @@ func (g *GCEAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.H Version: meta.Version("alpha"), Service: "HealthChecks", } - glog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.HealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HealthCheck objects. func (g *GCEAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { - glog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -7900,30 +7900,30 @@ func (g *GCEAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Alpha.HealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.HealthCheck f := func(l *alpha.HealthCheckList) error { - glog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -7931,9 +7931,9 @@ func (g *GCEAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha // Insert HealthCheck with key of value obj. func (g *GCEAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error { - glog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") @@ -7943,9 +7943,9 @@ func (g *GCEAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *a Version: meta.Version("alpha"), Service: "HealthChecks", } - glog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -7954,20 +7954,20 @@ func (g *GCEAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *a op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HealthCheck referenced by key. func (g *GCEAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") @@ -7977,9 +7977,9 @@ func (g *GCEAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error Version: meta.Version("alpha"), Service: "HealthChecks", } - glog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.HealthChecks.Delete(projectID, key.Name) @@ -7988,21 +7988,21 @@ func (g *GCEAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEAlphaHealthChecks. func (g *GCEAlphaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *alpha.HealthCheck) error { - glog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") @@ -8012,21 +8012,21 @@ func (g *GCEAlphaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 * Version: meta.Version("alpha"), Service: "HealthChecks", } - glog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.HealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -8087,7 +8087,7 @@ type MockHttpHealthChecks struct { func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpHealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -8099,12 +8099,12 @@ func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Http defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -8112,7 +8112,7 @@ func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Http Code: http.StatusNotFound, Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), } - glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -8120,7 +8120,7 @@ func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Http func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -8130,7 +8130,7 @@ func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -8143,7 +8143,7 @@ func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -8151,7 +8151,7 @@ func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -8163,7 +8163,7 @@ func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -8171,7 +8171,7 @@ func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g Code: http.StatusConflict, Message: fmt.Sprintf("MockHttpHealthChecks %v exists", key), } - glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -8180,7 +8180,7 @@ func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g obj.SelfLink = SelfLink(meta.VersionGA, projectID, "httpHealthChecks", key) m.Objects[*key] = &MockHttpHealthChecksObj{obj} - glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -8188,7 +8188,7 @@ func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g func (m *MockHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -8200,7 +8200,7 @@ func (m *MockHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -8208,12 +8208,12 @@ func (m *MockHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), } - glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -8237,10 +8237,10 @@ type GCEHttpHealthChecks struct { // Get the HttpHealthCheck named by key. func (g *GCEHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpHealthCheck, error) { - glog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") @@ -8250,21 +8250,21 @@ func (g *GCEHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpH Version: meta.Version("ga"), Service: "HttpHealthChecks", } - glog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.HttpHealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HttpHealthCheck objects. func (g *GCEHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { - glog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -8275,30 +8275,30 @@ func (g *GCEHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Htt if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.HttpHealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.HttpHealthCheck f := func(l *ga.HttpHealthCheckList) error { - glog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -8306,9 +8306,9 @@ func (g *GCEHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Htt // Insert HttpHealthCheck with key of value obj. func (g *GCEHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck) error { - glog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") @@ -8318,9 +8318,9 @@ func (g *GCEHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga Version: meta.Version("ga"), Service: "HttpHealthChecks", } - glog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -8329,20 +8329,20 @@ func (g *GCEHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HttpHealthCheck referenced by key. func (g *GCEHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") @@ -8352,9 +8352,9 @@ func (g *GCEHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "HttpHealthChecks", } - glog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HttpHealthChecks.Delete(projectID, key.Name) @@ -8363,21 +8363,21 @@ func (g *GCEHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEHttpHealthChecks. func (g *GCEHttpHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpHealthCheck) error { - glog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") @@ -8387,21 +8387,21 @@ func (g *GCEHttpHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *g Version: meta.Version("ga"), Service: "HttpHealthChecks", } - glog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HttpHealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -8462,7 +8462,7 @@ type MockHttpsHealthChecks struct { func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpsHealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -8474,12 +8474,12 @@ func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Htt defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -8487,7 +8487,7 @@ func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Htt Code: http.StatusNotFound, Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), } - glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -8495,7 +8495,7 @@ func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Htt func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -8505,7 +8505,7 @@ func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.H if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -8518,7 +8518,7 @@ func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.H objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -8526,7 +8526,7 @@ func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.H func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -8538,7 +8538,7 @@ func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -8546,7 +8546,7 @@ func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * Code: http.StatusConflict, Message: fmt.Sprintf("MockHttpsHealthChecks %v exists", key), } - glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -8555,7 +8555,7 @@ func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * obj.SelfLink = SelfLink(meta.VersionGA, projectID, "httpsHealthChecks", key) m.Objects[*key] = &MockHttpsHealthChecksObj{obj} - glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -8563,7 +8563,7 @@ func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -8575,7 +8575,7 @@ func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -8583,12 +8583,12 @@ func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), } - glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -8612,10 +8612,10 @@ type GCEHttpsHealthChecks struct { // Get the HttpsHealthCheck named by key. func (g *GCEHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpsHealthCheck, error) { - glog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpsHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpsHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") @@ -8625,21 +8625,21 @@ func (g *GCEHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Http Version: meta.Version("ga"), Service: "HttpsHealthChecks", } - glog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.HttpsHealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HttpsHealthCheck objects. func (g *GCEHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { - glog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -8650,30 +8650,30 @@ func (g *GCEHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.HttpsHealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.HttpsHealthCheck f := func(l *ga.HttpsHealthCheckList) error { - glog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -8681,9 +8681,9 @@ func (g *GCEHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht // Insert HttpsHealthCheck with key of value obj. func (g *GCEHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck) error { - glog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") @@ -8693,9 +8693,9 @@ func (g *GCEHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g Version: meta.Version("ga"), Service: "HttpsHealthChecks", } - glog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -8704,20 +8704,20 @@ func (g *GCEHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HttpsHealthCheck referenced by key. func (g *GCEHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpsHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpsHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") @@ -8727,9 +8727,9 @@ func (g *GCEHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error Version: meta.Version("ga"), Service: "HttpsHealthChecks", } - glog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HttpsHealthChecks.Delete(projectID, key.Name) @@ -8738,21 +8738,21 @@ func (g *GCEHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEHttpsHealthChecks. func (g *GCEHttpsHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpsHealthCheck) error { - glog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") @@ -8762,21 +8762,21 @@ func (g *GCEHttpsHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 * Version: meta.Version("ga"), Service: "HttpsHealthChecks", } - glog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HttpsHealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -8843,7 +8843,7 @@ type MockInstanceGroups struct { func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.InstanceGroup, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -8855,12 +8855,12 @@ func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.Instan defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -8868,7 +8868,7 @@ func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.Instan Code: http.StatusNotFound, Message: fmt.Sprintf("MockInstanceGroups %v not found", key), } - glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -8876,7 +8876,7 @@ func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.Instan func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -8886,7 +8886,7 @@ func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -8902,7 +8902,7 @@ func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -8910,7 +8910,7 @@ func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -8922,7 +8922,7 @@ func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga. defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -8930,7 +8930,7 @@ func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga. Code: http.StatusConflict, Message: fmt.Sprintf("MockInstanceGroups %v exists", key), } - glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -8939,7 +8939,7 @@ func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga. obj.SelfLink = SelfLink(meta.VersionGA, projectID, "instanceGroups", key) m.Objects[*key] = &MockInstanceGroupsObj{obj} - glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -8947,7 +8947,7 @@ func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga. func (m *MockInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -8959,7 +8959,7 @@ func (m *MockInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -8967,12 +8967,12 @@ func (m *MockInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockInstanceGroups %v not found", key), } - glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = nil", ctx, key) return nil } @@ -9020,10 +9020,10 @@ type GCEInstanceGroups struct { // Get the InstanceGroup named by key. func (g *GCEInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.InstanceGroup, error) { - glog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9033,21 +9033,21 @@ func (g *GCEInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.Instanc Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.InstanceGroups.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEInstanceGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEInstanceGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all InstanceGroup objects. func (g *GCEInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { - glog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") rk := &RateLimitKey{ ProjectID: projectID, @@ -9058,30 +9058,30 @@ func (g *GCEInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.GA.InstanceGroups.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.InstanceGroup f := func(l *ga.InstanceGroupList) error { - glog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -9089,9 +9089,9 @@ func (g *GCEInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) // Insert InstanceGroup with key of value obj. func (g *GCEInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup) error { - glog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9101,9 +9101,9 @@ func (g *GCEInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.I Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -9112,20 +9112,20 @@ func (g *GCEInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.I op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the InstanceGroup referenced by key. func (g *GCEInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9135,9 +9135,9 @@ func (g *GCEInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.InstanceGroups.Delete(projectID, key.Zone, key.Name) @@ -9145,21 +9145,21 @@ func (g *GCEInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } // AddInstances is a method on GCEInstanceGroups. func (g *GCEInstanceGroups) AddInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsAddInstancesRequest) error { - glog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9169,30 +9169,30 @@ func (g *GCEInstanceGroups) AddInstances(ctx context.Context, key *meta.Key, arg Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.InstanceGroups.AddInstances(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) return err } // ListInstances is a method on GCEInstanceGroups. func (g *GCEInstanceGroups) ListInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsListInstancesRequest, fl *filter.F) ([]*ga.InstanceWithNamedPorts, error) { - glog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9202,41 +9202,41 @@ func (g *GCEInstanceGroups) ListInstances(ctx context.Context, key *meta.Key, ar Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.InstanceGroups.ListInstances(projectID, key.Zone, key.Name, arg0) var all []*ga.InstanceWithNamedPorts f := func(l *ga.InstanceGroupsListInstances) error { - glog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): page %+v", ctx, key, l) + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): page %+v", ctx, key, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, nil, err) + klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) } return all, nil } // RemoveInstances is a method on GCEInstanceGroups. func (g *GCEInstanceGroups) RemoveInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsRemoveInstancesRequest) error { - glog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9246,30 +9246,30 @@ func (g *GCEInstanceGroups) RemoveInstances(ctx context.Context, key *meta.Key, Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.InstanceGroups.RemoveInstances(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) return err } // SetNamedPorts is a method on GCEInstanceGroups. func (g *GCEInstanceGroups) SetNamedPorts(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsSetNamedPortsRequest) error { - glog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9279,21 +9279,21 @@ func (g *GCEInstanceGroups) SetNamedPorts(ctx context.Context, key *meta.Key, ar Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.InstanceGroups.SetNamedPorts(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -9356,7 +9356,7 @@ type MockInstances struct { func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -9368,12 +9368,12 @@ func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, e defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -9381,7 +9381,7 @@ func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, e Code: http.StatusNotFound, Message: fmt.Sprintf("MockInstances %v not found", key), } - glog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -9389,7 +9389,7 @@ func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, e func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -9399,7 +9399,7 @@ func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([] if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -9415,7 +9415,7 @@ func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([] objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -9423,7 +9423,7 @@ func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([] func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instance) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -9435,7 +9435,7 @@ func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Insta defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -9443,7 +9443,7 @@ func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Insta Code: http.StatusConflict, Message: fmt.Sprintf("MockInstances %v exists", key), } - glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -9452,7 +9452,7 @@ func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Insta obj.SelfLink = SelfLink(meta.VersionGA, projectID, "instances", key) m.Objects[*key] = &MockInstancesObj{obj} - glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -9460,7 +9460,7 @@ func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Insta func (m *MockInstances) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -9472,7 +9472,7 @@ func (m *MockInstances) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -9480,12 +9480,12 @@ func (m *MockInstances) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockInstances %v not found", key), } - glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockInstances.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockInstances.Delete(%v, %v) = nil", ctx, key) return nil } @@ -9517,10 +9517,10 @@ type GCEInstances struct { // Get the Instance named by key. func (g *GCEInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, error) { - glog.V(5).Infof("GCEInstances.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEInstances.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9530,21 +9530,21 @@ func (g *GCEInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, er Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Instances.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Instance objects. func (g *GCEInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { - glog.V(5).Infof("GCEInstances.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEInstances.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") rk := &RateLimitKey{ ProjectID: projectID, @@ -9555,30 +9555,30 @@ func (g *GCEInstances) List(ctx context.Context, zone string, fl *filter.F) ([]* if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.GA.Instances.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Instance f := func(l *ga.InstanceList) error { - glog.V(5).Infof("GCEInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEInstances.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -9586,9 +9586,9 @@ func (g *GCEInstances) List(ctx context.Context, zone string, fl *filter.F) ([]* // Insert Instance with key of value obj. func (g *GCEInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instance) error { - glog.V(5).Infof("GCEInstances.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEInstances.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9598,9 +9598,9 @@ func (g *GCEInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instan Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -9609,20 +9609,20 @@ func (g *GCEInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instan op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Instance referenced by key. func (g *GCEInstances) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEInstances.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEInstances.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9632,9 +9632,9 @@ func (g *GCEInstances) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Instances.Delete(projectID, key.Zone, key.Name) @@ -9642,21 +9642,21 @@ func (g *GCEInstances) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) return err } // AttachDisk is a method on GCEInstances. func (g *GCEInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *ga.AttachedDisk) error { - glog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9666,30 +9666,30 @@ func (g *GCEInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *ga.A Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachDisk is a method on GCEInstances. func (g *GCEInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - glog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9699,21 +9699,21 @@ func (g *GCEInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 strin Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -9778,7 +9778,7 @@ type MockBetaInstances struct { func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Instance, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -9790,12 +9790,12 @@ func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Insta defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -9803,7 +9803,7 @@ func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Insta Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaInstances %v not found", key), } - glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -9811,7 +9811,7 @@ func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Insta func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -9821,7 +9821,7 @@ func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -9837,7 +9837,7 @@ func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -9845,7 +9845,7 @@ func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta.Instance) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -9857,7 +9857,7 @@ func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -9865,7 +9865,7 @@ func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaInstances %v exists", key), } - glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -9874,7 +9874,7 @@ func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "instances", key) m.Objects[*key] = &MockInstancesObj{obj} - glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -9882,7 +9882,7 @@ func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta func (m *MockBetaInstances) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -9894,7 +9894,7 @@ func (m *MockBetaInstances) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -9902,12 +9902,12 @@ func (m *MockBetaInstances) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaInstances %v not found", key), } - glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = nil", ctx, key) return nil } @@ -9947,10 +9947,10 @@ type GCEBetaInstances struct { // Get the Instance named by key. func (g *GCEBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Instance, error) { - glog.V(5).Infof("GCEBetaInstances.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -9960,21 +9960,21 @@ func (g *GCEBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Instan Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.Instances.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Instance objects. func (g *GCEBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { - glog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") rk := &RateLimitKey{ ProjectID: projectID, @@ -9985,30 +9985,30 @@ func (g *GCEBetaInstances) List(ctx context.Context, zone string, fl *filter.F) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.Beta.Instances.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.Instance f := func(l *beta.InstanceList) error { - glog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -10016,9 +10016,9 @@ func (g *GCEBetaInstances) List(ctx context.Context, zone string, fl *filter.F) // Insert Instance with key of value obj. func (g *GCEBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta.Instance) error { - glog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10028,9 +10028,9 @@ func (g *GCEBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta. Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -10039,20 +10039,20 @@ func (g *GCEBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Instance referenced by key. func (g *GCEBetaInstances) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10062,9 +10062,9 @@ func (g *GCEBetaInstances) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Instances.Delete(projectID, key.Zone, key.Name) @@ -10072,21 +10072,21 @@ func (g *GCEBetaInstances) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } // AttachDisk is a method on GCEBetaInstances. func (g *GCEBetaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *beta.AttachedDisk) error { - glog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10096,30 +10096,30 @@ func (g *GCEBetaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 * Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachDisk is a method on GCEBetaInstances. func (g *GCEBetaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - glog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10129,30 +10129,30 @@ func (g *GCEBetaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 s Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // UpdateNetworkInterface is a method on GCEBetaInstances. func (g *GCEBetaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *beta.NetworkInterface) error { - glog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10162,21 +10162,21 @@ func (g *GCEBetaInstances) UpdateNetworkInterface(ctx context.Context, key *meta Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Instances.UpdateNetworkInterface(projectID, key.Zone, key.Name, arg0, arg1) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -10241,7 +10241,7 @@ type MockAlphaInstances struct { func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Instance, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -10253,12 +10253,12 @@ func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Ins defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -10266,7 +10266,7 @@ func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Ins Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaInstances %v not found", key), } - glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -10274,7 +10274,7 @@ func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Ins func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -10284,7 +10284,7 @@ func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -10300,7 +10300,7 @@ func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -10308,7 +10308,7 @@ func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alpha.Instance) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -10320,7 +10320,7 @@ func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alp defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -10328,7 +10328,7 @@ func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alp Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaInstances %v exists", key), } - glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -10337,7 +10337,7 @@ func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alp obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "instances", key) m.Objects[*key] = &MockInstancesObj{obj} - glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -10345,7 +10345,7 @@ func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alp func (m *MockAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -10357,7 +10357,7 @@ func (m *MockAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -10365,12 +10365,12 @@ func (m *MockAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaInstances %v not found", key), } - glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = nil", ctx, key) return nil } @@ -10410,10 +10410,10 @@ type GCEAlphaInstances struct { // Get the Instance named by key. func (g *GCEAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Instance, error) { - glog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10423,21 +10423,21 @@ func (g *GCEAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Inst Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.Instances.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Instance objects. func (g *GCEAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { - glog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") rk := &RateLimitKey{ ProjectID: projectID, @@ -10448,30 +10448,30 @@ func (g *GCEAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.Alpha.Instances.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.Instance f := func(l *alpha.InstanceList) error { - glog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -10479,9 +10479,9 @@ func (g *GCEAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) // Insert Instance with key of value obj. func (g *GCEAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alpha.Instance) error { - glog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10491,9 +10491,9 @@ func (g *GCEAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alph Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -10502,20 +10502,20 @@ func (g *GCEAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alph op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Instance referenced by key. func (g *GCEAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10525,9 +10525,9 @@ func (g *GCEAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Instances.Delete(projectID, key.Zone, key.Name) @@ -10535,21 +10535,21 @@ func (g *GCEAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } // AttachDisk is a method on GCEAlphaInstances. func (g *GCEAlphaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *alpha.AttachedDisk) error { - glog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10559,30 +10559,30 @@ func (g *GCEAlphaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachDisk is a method on GCEAlphaInstances. func (g *GCEAlphaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - glog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10592,30 +10592,30 @@ func (g *GCEAlphaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // UpdateNetworkInterface is a method on GCEAlphaInstances. func (g *GCEAlphaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *alpha.NetworkInterface) error { - glog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10625,21 +10625,21 @@ func (g *GCEAlphaInstances) UpdateNetworkInterface(ctx context.Context, key *met Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Instances.UpdateNetworkInterface(projectID, key.Zone, key.Name, arg0, arg1) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -10707,7 +10707,7 @@ type MockAlphaNetworkEndpointGroups struct { func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*alpha.NetworkEndpointGroup, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -10719,12 +10719,12 @@ func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -10732,7 +10732,7 @@ func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -10740,7 +10740,7 @@ func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -10750,7 +10750,7 @@ func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -10766,7 +10766,7 @@ func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -10774,7 +10774,7 @@ func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -10786,7 +10786,7 @@ func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -10794,7 +10794,7 @@ func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.K Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v exists", key), } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -10803,7 +10803,7 @@ func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.K obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "networkEndpointGroups", key) m.Objects[*key] = &MockNetworkEndpointGroupsObj{obj} - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -10811,7 +10811,7 @@ func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.K func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -10823,7 +10823,7 @@ func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -10831,12 +10831,12 @@ func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.K Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) return nil } @@ -10844,7 +10844,7 @@ func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.K func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { if m.AggregatedListHook != nil { if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -10854,7 +10854,7 @@ func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl if m.AggregatedListError != nil { err := *m.AggregatedListError - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } @@ -10863,7 +10863,7 @@ func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl res, err := ParseResourceURL(obj.ToAlpha().SelfLink) location := res.Key.Zone if err != nil { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } if !fl.Match(obj.ToAlpha()) { @@ -10871,7 +10871,7 @@ func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl } objs[location] = append(objs[location], obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -10911,10 +10911,10 @@ type GCEAlphaNetworkEndpointGroups struct { // Get the NetworkEndpointGroup named by key. func (g *GCEAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*alpha.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -10924,21 +10924,21 @@ func (g *GCEAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.NetworkEndpointGroups.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all NetworkEndpointGroup objects. func (g *GCEAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") rk := &RateLimitKey{ ProjectID: projectID, @@ -10949,30 +10949,30 @@ func (g *GCEAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, f if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.Alpha.NetworkEndpointGroups.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.NetworkEndpointGroup f := func(l *alpha.NetworkEndpointGroupList) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -10980,9 +10980,9 @@ func (g *GCEAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, f // Insert NetworkEndpointGroup with key of value obj. func (g *GCEAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -10992,9 +10992,9 @@ func (g *GCEAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -11003,20 +11003,20 @@ func (g *GCEAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the NetworkEndpointGroup referenced by key. func (g *GCEAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -11026,9 +11026,9 @@ func (g *GCEAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.NetworkEndpointGroups.Delete(projectID, key.Zone, key.Name) @@ -11036,18 +11036,18 @@ func (g *GCEAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } // AggregatedList lists all resources of the given type across all locations. func (g *GCEAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") rk := &RateLimitKey{ @@ -11057,9 +11057,9 @@ func (g *GCEAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) return nil, err } @@ -11072,33 +11072,33 @@ func (g *GCEAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * all := map[string][]*alpha.NetworkEndpointGroup{} f := func(l *alpha.NetworkEndpointGroupAggregatedList) error { for k, v := range l.Items { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) all[k] = append(all[k], v.NetworkEndpointGroups...) } return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // AttachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. func (g *GCEAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -11108,30 +11108,30 @@ func (g *GCEAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Conte Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.NetworkEndpointGroups.AttachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. func (g *GCEAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -11141,30 +11141,30 @@ func (g *GCEAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Conte Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.NetworkEndpointGroups.DetachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } // ListNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. func (g *GCEAlphaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*alpha.NetworkEndpointWithHealthStatus, error) { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -11174,31 +11174,31 @@ func (g *GCEAlphaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.NetworkEndpointGroups.ListNetworkEndpoints(projectID, key.Zone, key.Name, arg0) var all []*alpha.NetworkEndpointWithHealthStatus f := func(l *alpha.NetworkEndpointGroupsListNetworkEndpoints) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) } return all, nil } @@ -11267,7 +11267,7 @@ type MockBetaNetworkEndpointGroups struct { func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*beta.NetworkEndpointGroup, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -11279,12 +11279,12 @@ func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -11292,7 +11292,7 @@ func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v not found", key), } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -11300,7 +11300,7 @@ func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.NetworkEndpointGroup, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -11310,7 +11310,7 @@ func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, f if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -11326,7 +11326,7 @@ func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, f objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -11334,7 +11334,7 @@ func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, f func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -11346,7 +11346,7 @@ func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -11354,7 +11354,7 @@ func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v exists", key), } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -11363,7 +11363,7 @@ func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "networkEndpointGroups", key) m.Objects[*key] = &MockNetworkEndpointGroupsObj{obj} - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -11371,7 +11371,7 @@ func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -11383,7 +11383,7 @@ func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -11391,12 +11391,12 @@ func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v not found", key), } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) return nil } @@ -11404,7 +11404,7 @@ func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.NetworkEndpointGroup, error) { if m.AggregatedListHook != nil { if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -11414,7 +11414,7 @@ func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * if m.AggregatedListError != nil { err := *m.AggregatedListError - glog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } @@ -11423,7 +11423,7 @@ func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * res, err := ParseResourceURL(obj.ToBeta().SelfLink) location := res.Key.Zone if err != nil { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } if !fl.Match(obj.ToBeta()) { @@ -11431,7 +11431,7 @@ func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * } objs[location] = append(objs[location], obj.ToBeta()) } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -11471,10 +11471,10 @@ type GCEBetaNetworkEndpointGroups struct { // Get the NetworkEndpointGroup named by key. func (g *GCEBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*beta.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11484,21 +11484,21 @@ func (g *GCEBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) ( Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.NetworkEndpointGroups.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all NetworkEndpointGroup objects. func (g *GCEBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") rk := &RateLimitKey{ ProjectID: projectID, @@ -11509,30 +11509,30 @@ func (g *GCEBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.Beta.NetworkEndpointGroups.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.NetworkEndpointGroup f := func(l *beta.NetworkEndpointGroupList) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -11540,9 +11540,9 @@ func (g *GCEBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl // Insert NetworkEndpointGroup with key of value obj. func (g *GCEBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11552,9 +11552,9 @@ func (g *GCEBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -11563,20 +11563,20 @@ func (g *GCEBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the NetworkEndpointGroup referenced by key. func (g *GCEBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11586,9 +11586,9 @@ func (g *GCEBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.NetworkEndpointGroups.Delete(projectID, key.Zone, key.Name) @@ -11596,18 +11596,18 @@ func (g *GCEBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } // AggregatedList lists all resources of the given type across all locations. func (g *GCEBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") rk := &RateLimitKey{ @@ -11617,9 +11617,9 @@ func (g *GCEBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *f Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) return nil, err } @@ -11632,33 +11632,33 @@ func (g *GCEBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *f all := map[string][]*beta.NetworkEndpointGroup{} f := func(l *beta.NetworkEndpointGroupAggregatedList) error { for k, v := range l.Items { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) all[k] = append(all[k], v.NetworkEndpointGroups...) } return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // AttachNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. func (g *GCEBetaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsAttachEndpointsRequest) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11668,30 +11668,30 @@ func (g *GCEBetaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Contex Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.NetworkEndpointGroups.AttachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. func (g *GCEBetaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsDetachEndpointsRequest) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11701,30 +11701,30 @@ func (g *GCEBetaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Contex Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.NetworkEndpointGroups.DetachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } // ListNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. func (g *GCEBetaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*beta.NetworkEndpointWithHealthStatus, error) { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11734,31 +11734,31 @@ func (g *GCEBetaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.NetworkEndpointGroups.ListNetworkEndpoints(projectID, key.Zone, key.Name, arg0) var all []*beta.NetworkEndpointWithHealthStatus f := func(l *beta.NetworkEndpointGroupsListNetworkEndpoints) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) } return all, nil } @@ -11859,7 +11859,7 @@ type MockRegions struct { func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -11871,12 +11871,12 @@ func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -11884,7 +11884,7 @@ func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error Code: http.StatusNotFound, Message: fmt.Sprintf("MockRegions %v not found", key), } - glog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -11892,7 +11892,7 @@ func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -11902,7 +11902,7 @@ func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, err if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockRegions.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockRegions.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -11915,7 +11915,7 @@ func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, err objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -11931,10 +11931,10 @@ type GCERegions struct { // Get the Region named by key. func (g *GCERegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error) { - glog.V(5).Infof("GCERegions.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERegions.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegions.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegions.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") @@ -11944,21 +11944,21 @@ func (g *GCERegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error) Version: meta.Version("ga"), Service: "Regions", } - glog.V(5).Infof("GCERegions.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegions.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegions.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegions.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Regions.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCERegions.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegions.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Region objects. func (g *GCERegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { - glog.V(5).Infof("GCERegions.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCERegions.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") rk := &RateLimitKey{ ProjectID: projectID, @@ -11969,30 +11969,30 @@ func (g *GCERegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, erro if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCERegions.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCERegions.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.Regions.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Region f := func(l *ga.RegionList) error { - glog.V(5).Infof("GCERegions.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCERegions.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCERegions.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCERegions.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -12053,7 +12053,7 @@ type MockRoutes struct { func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -12065,12 +12065,12 @@ func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -12078,7 +12078,7 @@ func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) Code: http.StatusNotFound, Message: fmt.Sprintf("MockRoutes %v not found", key), } - glog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -12086,7 +12086,7 @@ func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -12096,7 +12096,7 @@ func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockRoutes.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockRoutes.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -12109,7 +12109,7 @@ func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -12117,7 +12117,7 @@ func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -12129,7 +12129,7 @@ func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) e defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -12137,7 +12137,7 @@ func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) e Code: http.StatusConflict, Message: fmt.Sprintf("MockRoutes %v exists", key), } - glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -12146,7 +12146,7 @@ func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) e obj.SelfLink = SelfLink(meta.VersionGA, projectID, "routes", key) m.Objects[*key] = &MockRoutesObj{obj} - glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -12154,7 +12154,7 @@ func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) e func (m *MockRoutes) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -12166,7 +12166,7 @@ func (m *MockRoutes) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -12174,12 +12174,12 @@ func (m *MockRoutes) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockRoutes %v not found", key), } - glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockRoutes.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = nil", ctx, key) return nil } @@ -12195,10 +12195,10 @@ type GCERoutes struct { // Get the Route named by key. func (g *GCERoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) { - glog.V(5).Infof("GCERoutes.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERoutes.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERoutes.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERoutes.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") @@ -12208,21 +12208,21 @@ func (g *GCERoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) { Version: meta.Version("ga"), Service: "Routes", } - glog.V(5).Infof("GCERoutes.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERoutes.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERoutes.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Routes.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCERoutes.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERoutes.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Route objects. func (g *GCERoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { - glog.V(5).Infof("GCERoutes.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCERoutes.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") rk := &RateLimitKey{ ProjectID: projectID, @@ -12233,30 +12233,30 @@ func (g *GCERoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCERoutes.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCERoutes.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.Routes.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Route f := func(l *ga.RouteList) error { - glog.V(5).Infof("GCERoutes.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCERoutes.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCERoutes.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCERoutes.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -12264,9 +12264,9 @@ func (g *GCERoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) // Insert Route with key of value obj. func (g *GCERoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) error { - glog.V(5).Infof("GCERoutes.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCERoutes.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCERoutes.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERoutes.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") @@ -12276,9 +12276,9 @@ func (g *GCERoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) er Version: meta.Version("ga"), Service: "Routes", } - glog.V(5).Infof("GCERoutes.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERoutes.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERoutes.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -12287,20 +12287,20 @@ func (g *GCERoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) er op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERoutes.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERoutes.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCERoutes.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Route referenced by key. func (g *GCERoutes) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCERoutes.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERoutes.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERoutes.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERoutes.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") @@ -12310,9 +12310,9 @@ func (g *GCERoutes) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Routes", } - glog.V(5).Infof("GCERoutes.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERoutes.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERoutes.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Routes.Delete(projectID, key.Name) @@ -12321,12 +12321,12 @@ func (g *GCERoutes) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -12395,7 +12395,7 @@ type MockBetaSecurityPolicies struct { func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*beta.SecurityPolicy, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -12407,12 +12407,12 @@ func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*bet defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -12420,7 +12420,7 @@ func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*bet Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaSecurityPolicies %v not found", key), } - glog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -12428,7 +12428,7 @@ func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*bet func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*beta.SecurityPolicy, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -12438,7 +12438,7 @@ func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*b if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -12451,7 +12451,7 @@ func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*b objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -12459,7 +12459,7 @@ func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*b func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -12471,7 +12471,7 @@ func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, ob defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -12479,7 +12479,7 @@ func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, ob Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaSecurityPolicies %v exists", key), } - glog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -12488,7 +12488,7 @@ func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, ob obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "securityPolicies", key) m.Objects[*key] = &MockSecurityPoliciesObj{obj} - glog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -12496,7 +12496,7 @@ func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, ob func (m *MockBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -12508,7 +12508,7 @@ func (m *MockBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) er defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -12516,12 +12516,12 @@ func (m *MockBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) er Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaSecurityPolicies %v not found", key), } - glog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = nil", ctx, key) return nil } @@ -12577,10 +12577,10 @@ type GCEBetaSecurityPolicies struct { // Get the SecurityPolicy named by key. func (g *GCEBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*beta.SecurityPolicy, error) { - glog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12590,21 +12590,21 @@ func (g *GCEBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*beta Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.SecurityPolicies.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all SecurityPolicy objects. func (g *GCEBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*beta.SecurityPolicy, error) { - glog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") rk := &RateLimitKey{ ProjectID: projectID, @@ -12615,30 +12615,30 @@ func (g *GCEBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*be if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Beta.SecurityPolicies.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.SecurityPolicy f := func(l *beta.SecurityPolicyList) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -12646,9 +12646,9 @@ func (g *GCEBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*be // Insert SecurityPolicy with key of value obj. func (g *GCEBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12658,9 +12658,9 @@ func (g *GCEBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -12669,20 +12669,20 @@ func (g *GCEBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the SecurityPolicy referenced by key. func (g *GCEBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12692,9 +12692,9 @@ func (g *GCEBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) err Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.Delete(projectID, key.Name) @@ -12703,21 +12703,21 @@ func (g *GCEBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) err op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } // AddRule is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) AddRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12727,30 +12727,30 @@ func (g *GCEBetaSecurityPolicies) AddRule(ctx context.Context, key *meta.Key, ar Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.AddRule(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) return err } // GetRule is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) GetRule(ctx context.Context, key *meta.Key) (*beta.SecurityPolicyRule, error) { - glog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12760,25 +12760,25 @@ func (g *GCEBetaSecurityPolicies) GetRule(ctx context.Context, key *meta.Key) (* Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.SecurityPolicies.GetRule(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err } // Patch is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) Patch(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicy) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12788,30 +12788,30 @@ func (g *GCEBetaSecurityPolicies) Patch(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.Patch(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } // PatchRule is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) PatchRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12821,30 +12821,30 @@ func (g *GCEBetaSecurityPolicies) PatchRule(ctx context.Context, key *meta.Key, Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.PatchRule(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) return err } // RemoveRule is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) RemoveRule(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12854,21 +12854,21 @@ func (g *GCEBetaSecurityPolicies) RemoveRule(ctx context.Context, key *meta.Key) Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.RemoveRule(projectID, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -12927,7 +12927,7 @@ type MockSslCertificates struct { func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCertificate, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -12939,12 +12939,12 @@ func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCe defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -12952,7 +12952,7 @@ func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCe Code: http.StatusNotFound, Message: fmt.Sprintf("MockSslCertificates %v not found", key), } - glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -12960,7 +12960,7 @@ func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCe func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -12970,7 +12970,7 @@ func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.Ssl if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockSslCertificates.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockSslCertificates.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -12983,7 +12983,7 @@ func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.Ssl objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -12991,7 +12991,7 @@ func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.Ssl func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga.SslCertificate) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -13003,7 +13003,7 @@ func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -13011,7 +13011,7 @@ func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga Code: http.StatusConflict, Message: fmt.Sprintf("MockSslCertificates %v exists", key), } - glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -13020,7 +13020,7 @@ func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga obj.SelfLink = SelfLink(meta.VersionGA, projectID, "sslCertificates", key) m.Objects[*key] = &MockSslCertificatesObj{obj} - glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -13028,7 +13028,7 @@ func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga func (m *MockSslCertificates) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -13040,7 +13040,7 @@ func (m *MockSslCertificates) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -13048,12 +13048,12 @@ func (m *MockSslCertificates) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockSslCertificates %v not found", key), } - glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = nil", ctx, key) return nil } @@ -13069,10 +13069,10 @@ type GCESslCertificates struct { // Get the SslCertificate named by key. func (g *GCESslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCertificate, error) { - glog.V(5).Infof("GCESslCertificates.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCESslCertificates.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCESslCertificates.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCESslCertificates.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") @@ -13082,21 +13082,21 @@ func (g *GCESslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCer Version: meta.Version("ga"), Service: "SslCertificates", } - glog.V(5).Infof("GCESslCertificates.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCESslCertificates.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCESslCertificates.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.SslCertificates.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCESslCertificates.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCESslCertificates.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all SslCertificate objects. func (g *GCESslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { - glog.V(5).Infof("GCESslCertificates.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCESslCertificates.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") rk := &RateLimitKey{ ProjectID: projectID, @@ -13107,30 +13107,30 @@ func (g *GCESslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslC if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCESslCertificates.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCESslCertificates.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.SslCertificates.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.SslCertificate f := func(l *ga.SslCertificateList) error { - glog.V(5).Infof("GCESslCertificates.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCESslCertificates.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -13138,9 +13138,9 @@ func (g *GCESslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslC // Insert SslCertificate with key of value obj. func (g *GCESslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga.SslCertificate) error { - glog.V(5).Infof("GCESslCertificates.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCESslCertificates.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCESslCertificates.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCESslCertificates.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") @@ -13150,9 +13150,9 @@ func (g *GCESslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga. Version: meta.Version("ga"), Service: "SslCertificates", } - glog.V(5).Infof("GCESslCertificates.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCESslCertificates.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -13161,20 +13161,20 @@ func (g *GCESslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCESslCertificates.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the SslCertificate referenced by key. func (g *GCESslCertificates) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCESslCertificates.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCESslCertificates.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCESslCertificates.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCESslCertificates.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") @@ -13184,9 +13184,9 @@ func (g *GCESslCertificates) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "SslCertificates", } - glog.V(5).Infof("GCESslCertificates.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCESslCertificates.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCESslCertificates.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.SslCertificates.Delete(projectID, key.Name) @@ -13195,12 +13195,12 @@ func (g *GCESslCertificates) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -13261,7 +13261,7 @@ type MockTargetHttpProxies struct { func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpProxy, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -13273,12 +13273,12 @@ func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.Tar defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -13286,7 +13286,7 @@ func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.Tar Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), } - glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -13294,7 +13294,7 @@ func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.Tar func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -13304,7 +13304,7 @@ func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -13317,7 +13317,7 @@ func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -13325,7 +13325,7 @@ func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -13337,7 +13337,7 @@ func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj * defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -13345,7 +13345,7 @@ func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj * Code: http.StatusConflict, Message: fmt.Sprintf("MockTargetHttpProxies %v exists", key), } - glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -13354,7 +13354,7 @@ func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj * obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetHttpProxies", key) m.Objects[*key] = &MockTargetHttpProxiesObj{obj} - glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -13362,7 +13362,7 @@ func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj * func (m *MockTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -13374,7 +13374,7 @@ func (m *MockTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -13382,12 +13382,12 @@ func (m *MockTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), } - glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) return nil } @@ -13411,10 +13411,10 @@ type GCETargetHttpProxies struct { // Get the TargetHttpProxy named by key. func (g *GCETargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpProxy, error) { - glog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") @@ -13424,21 +13424,21 @@ func (g *GCETargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.Targ Version: meta.Version("ga"), Service: "TargetHttpProxies", } - glog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.TargetHttpProxies.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all TargetHttpProxy objects. func (g *GCETargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { - glog.V(5).Infof("GCETargetHttpProxies.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCETargetHttpProxies.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") rk := &RateLimitKey{ ProjectID: projectID, @@ -13449,30 +13449,30 @@ func (g *GCETargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.Ta if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCETargetHttpProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.TargetHttpProxies.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.TargetHttpProxy f := func(l *ga.TargetHttpProxyList) error { - glog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -13480,9 +13480,9 @@ func (g *GCETargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.Ta // Insert TargetHttpProxy with key of value obj. func (g *GCETargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy) error { - glog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") @@ -13492,9 +13492,9 @@ func (g *GCETargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *g Version: meta.Version("ga"), Service: "TargetHttpProxies", } - glog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -13503,20 +13503,20 @@ func (g *GCETargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *g op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the TargetHttpProxy referenced by key. func (g *GCETargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") @@ -13526,9 +13526,9 @@ func (g *GCETargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error Version: meta.Version("ga"), Service: "TargetHttpProxies", } - glog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpProxies.Delete(projectID, key.Name) @@ -13537,21 +13537,21 @@ func (g *GCETargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } // SetUrlMap is a method on GCETargetHttpProxies. func (g *GCETargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { - glog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") @@ -13561,21 +13561,21 @@ func (g *GCETargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg Version: meta.Version("ga"), Service: "TargetHttpProxies", } - glog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpProxies.SetUrlMap(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -13638,7 +13638,7 @@ type MockTargetHttpsProxies struct { func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpsProxy, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -13650,12 +13650,12 @@ func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.Ta defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -13663,7 +13663,7 @@ func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.Ta Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), } - glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -13671,7 +13671,7 @@ func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.Ta func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -13681,7 +13681,7 @@ func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga. if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -13694,7 +13694,7 @@ func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga. objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -13702,7 +13702,7 @@ func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga. func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -13714,7 +13714,7 @@ func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -13722,7 +13722,7 @@ func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj Code: http.StatusConflict, Message: fmt.Sprintf("MockTargetHttpsProxies %v exists", key), } - glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -13731,7 +13731,7 @@ func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetHttpsProxies", key) m.Objects[*key] = &MockTargetHttpsProxiesObj{obj} - glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -13739,7 +13739,7 @@ func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -13751,7 +13751,7 @@ func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) erro defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -13759,12 +13759,12 @@ func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) erro Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), } - glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) return nil } @@ -13796,10 +13796,10 @@ type GCETargetHttpsProxies struct { // Get the TargetHttpsProxy named by key. func (g *GCETargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpsProxy, error) { - glog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13809,21 +13809,21 @@ func (g *GCETargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.Tar Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.TargetHttpsProxies.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all TargetHttpsProxy objects. func (g *GCETargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { - glog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") rk := &RateLimitKey{ ProjectID: projectID, @@ -13834,30 +13834,30 @@ func (g *GCETargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.TargetHttpsProxies.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.TargetHttpsProxy f := func(l *ga.TargetHttpsProxyList) error { - glog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -13865,9 +13865,9 @@ func (g *GCETargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T // Insert TargetHttpsProxy with key of value obj. func (g *GCETargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy) error { - glog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13877,9 +13877,9 @@ func (g *GCETargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj * Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -13888,20 +13888,20 @@ func (g *GCETargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj * op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the TargetHttpsProxy referenced by key. func (g *GCETargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13911,9 +13911,9 @@ func (g *GCETargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpsProxies.Delete(projectID, key.Name) @@ -13922,21 +13922,21 @@ func (g *GCETargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } // SetSslCertificates is a method on GCETargetHttpsProxies. func (g *GCETargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *ga.TargetHttpsProxiesSetSslCertificatesRequest) error { - glog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13946,30 +13946,30 @@ func (g *GCETargetHttpsProxies) SetSslCertificates(ctx context.Context, key *met Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpsProxies.SetSslCertificates(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) return err } // SetUrlMap is a method on GCETargetHttpsProxies. func (g *GCETargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { - glog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13979,21 +13979,21 @@ func (g *GCETargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, ar Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpsProxies.SetUrlMap(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -14056,7 +14056,7 @@ type MockTargetPools struct { func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPool, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -14068,12 +14068,12 @@ func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPoo defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -14081,7 +14081,7 @@ func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPoo Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetPools %v not found", key), } - glog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -14089,7 +14089,7 @@ func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPoo func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -14099,7 +14099,7 @@ func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -14115,7 +14115,7 @@ func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -14123,7 +14123,7 @@ func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetPool) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -14135,7 +14135,7 @@ func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Tar defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -14143,7 +14143,7 @@ func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Tar Code: http.StatusConflict, Message: fmt.Sprintf("MockTargetPools %v exists", key), } - glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -14152,7 +14152,7 @@ func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Tar obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetPools", key) m.Objects[*key] = &MockTargetPoolsObj{obj} - glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -14160,7 +14160,7 @@ func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Tar func (m *MockTargetPools) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -14172,7 +14172,7 @@ func (m *MockTargetPools) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -14180,12 +14180,12 @@ func (m *MockTargetPools) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetPools %v not found", key), } - glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = nil", ctx, key) return nil } @@ -14217,10 +14217,10 @@ type GCETargetPools struct { // Get the TargetPool named by key. func (g *GCETargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPool, error) { - glog.V(5).Infof("GCETargetPools.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetPools.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14230,21 +14230,21 @@ func (g *GCETargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPool Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.TargetPools.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCETargetPools.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCETargetPools.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all TargetPool objects. func (g *GCETargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { - glog.V(5).Infof("GCETargetPools.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCETargetPools.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") rk := &RateLimitKey{ ProjectID: projectID, @@ -14255,30 +14255,30 @@ func (g *GCETargetPools) List(ctx context.Context, region string, fl *filter.F) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCETargetPools.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCETargetPools.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.GA.TargetPools.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.TargetPool f := func(l *ga.TargetPoolList) error { - glog.V(5).Infof("GCETargetPools.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCETargetPools.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -14286,9 +14286,9 @@ func (g *GCETargetPools) List(ctx context.Context, region string, fl *filter.F) // Insert TargetPool with key of value obj. func (g *GCETargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetPool) error { - glog.V(5).Infof("GCETargetPools.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCETargetPools.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14298,9 +14298,9 @@ func (g *GCETargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Targ Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -14309,20 +14309,20 @@ func (g *GCETargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Targ op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetPools.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCETargetPools.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the TargetPool referenced by key. func (g *GCETargetPools) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCETargetPools.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetPools.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14332,9 +14332,9 @@ func (g *GCETargetPools) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetPools.Delete(projectID, key.Region, key.Name) @@ -14342,21 +14342,21 @@ func (g *GCETargetPools) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } // AddInstance is a method on GCETargetPools. func (g *GCETargetPools) AddInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsAddInstanceRequest) error { - glog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.AddInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.AddInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14366,30 +14366,30 @@ func (g *GCETargetPools) AddInstance(ctx context.Context, key *meta.Key, arg0 *g Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetPools.AddInstance(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) return err } // RemoveInstance is a method on GCETargetPools. func (g *GCETargetPools) RemoveInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsRemoveInstanceRequest) error { - glog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14399,21 +14399,21 @@ func (g *GCETargetPools) RemoveInstance(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetPools.RemoveInstance(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -14474,7 +14474,7 @@ type MockUrlMaps struct { func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -14486,12 +14486,12 @@ func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -14499,7 +14499,7 @@ func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error Code: http.StatusNotFound, Message: fmt.Sprintf("MockUrlMaps %v not found", key), } - glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -14507,7 +14507,7 @@ func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -14517,7 +14517,7 @@ func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, err if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockUrlMaps.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockUrlMaps.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -14530,7 +14530,7 @@ func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, err objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -14538,7 +14538,7 @@ func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, err func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -14550,7 +14550,7 @@ func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -14558,7 +14558,7 @@ func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) Code: http.StatusConflict, Message: fmt.Sprintf("MockUrlMaps %v exists", key), } - glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -14567,7 +14567,7 @@ func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) obj.SelfLink = SelfLink(meta.VersionGA, projectID, "urlMaps", key) m.Objects[*key] = &MockUrlMapsObj{obj} - glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -14575,7 +14575,7 @@ func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) func (m *MockUrlMaps) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -14587,7 +14587,7 @@ func (m *MockUrlMaps) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -14595,12 +14595,12 @@ func (m *MockUrlMaps) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockUrlMaps %v not found", key), } - glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = nil", ctx, key) return nil } @@ -14624,10 +14624,10 @@ type GCEUrlMaps struct { // Get the UrlMap named by key. func (g *GCEUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) { - glog.V(5).Infof("GCEUrlMaps.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEUrlMaps.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEUrlMaps.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") @@ -14637,21 +14637,21 @@ func (g *GCEUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) Version: meta.Version("ga"), Service: "UrlMaps", } - glog.V(5).Infof("GCEUrlMaps.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEUrlMaps.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.UrlMaps.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEUrlMaps.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEUrlMaps.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all UrlMap objects. func (g *GCEUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { - glog.V(5).Infof("GCEUrlMaps.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEUrlMaps.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, @@ -14662,30 +14662,30 @@ func (g *GCEUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, erro if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEUrlMaps.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.UrlMaps.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.UrlMap f := func(l *ga.UrlMapList) error { - glog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -14693,9 +14693,9 @@ func (g *GCEUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, erro // Insert UrlMap with key of value obj. func (g *GCEUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) error { - glog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEUrlMaps.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") @@ -14705,9 +14705,9 @@ func (g *GCEUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) Version: meta.Version("ga"), Service: "UrlMaps", } - glog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -14716,20 +14716,20 @@ func (g *GCEUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the UrlMap referenced by key. func (g *GCEUrlMaps) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEUrlMaps.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") @@ -14739,9 +14739,9 @@ func (g *GCEUrlMaps) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "UrlMaps", } - glog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEUrlMaps.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.UrlMaps.Delete(projectID, key.Name) @@ -14750,21 +14750,21 @@ func (g *GCEUrlMaps) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEUrlMaps. func (g *GCEUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *ga.UrlMap) error { - glog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEUrlMaps.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") @@ -14774,21 +14774,21 @@ func (g *GCEUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *ga.UrlMap) Version: meta.Version("ga"), Service: "UrlMaps", } - glog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.UrlMaps.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -14839,7 +14839,7 @@ type MockZones struct { func (m *MockZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockZones.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockZones.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -14851,12 +14851,12 @@ func (m *MockZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockZones.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockZones.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -14864,7 +14864,7 @@ func (m *MockZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { Code: http.StatusNotFound, Message: fmt.Sprintf("MockZones %v not found", key), } - glog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -14872,7 +14872,7 @@ func (m *MockZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { func (m *MockZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockZones.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockZones.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -14882,7 +14882,7 @@ func (m *MockZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockZones.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockZones.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -14895,7 +14895,7 @@ func (m *MockZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockZones.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockZones.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -14911,10 +14911,10 @@ type GCEZones struct { // Get the Zone named by key. func (g *GCEZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { - glog.V(5).Infof("GCEZones.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEZones.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEZones.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEZones.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Zones") @@ -14924,21 +14924,21 @@ func (g *GCEZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { Version: meta.Version("ga"), Service: "Zones", } - glog.V(5).Infof("GCEZones.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEZones.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEZones.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEZones.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Zones.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEZones.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEZones.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Zone objects. func (g *GCEZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) { - glog.V(5).Infof("GCEZones.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEZones.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Zones") rk := &RateLimitKey{ ProjectID: projectID, @@ -14949,30 +14949,30 @@ func (g *GCEZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) { if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEZones.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEZones.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.Zones.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Zone f := func(l *ga.ZoneList) error { - glog.V(5).Infof("GCEZones.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEZones.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEZones.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEZones.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEZones.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEZones.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEZones.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEZones.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go index 6a85207ed03b4..04d13e0bfd292 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go @@ -169,9 +169,9 @@ var AllServices = []*ServiceInfo{ Object: "Disk", Service: "RegionDisks", Resource: "disks", - version: VersionBeta, + version: VersionGA, keyType: Regional, - serviceType: reflect.TypeOf(&beta.RegionDisksService{}), + serviceType: reflect.TypeOf(&ga.RegionDisksService{}), additionalMethods: []string{ "Resize", }, diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/BUILD.bazel new file mode 100644 index 0000000000000..ed9436f974928 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["mock.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock", + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock", + visibility = ["//visibility:public"], + deps = [ + "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", + "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", + "//vendor/google.golang.org/api/compute/v1:go_default_library", + "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/mock.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/mock.go new file mode 100644 index 0000000000000..0489bf8d4a313 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/mock.go @@ -0,0 +1,640 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mock encapsulates mocks for testing GCE provider functionality. +// These methods are used to override the mock objects' methods in order to +// intercept the standard processing and to add custom logic for test purposes. +// +// // Example usage: +// cloud := cloud.NewMockGCE() +// cloud.MockTargetPools.AddInstanceHook = mock.AddInstanceHook +package mock + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "sync" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" + cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +var ( + // InUseError is a shared variable with error code StatusBadRequest for error verification. + InUseError = &googleapi.Error{Code: http.StatusBadRequest, Message: "It's being used by god."} + // InternalServerError is shared variable with error code StatusInternalServerError for error verification. + InternalServerError = &googleapi.Error{Code: http.StatusInternalServerError} + // UnauthorizedErr wraps a Google API error with code StatusForbidden. + UnauthorizedErr = &googleapi.Error{Code: http.StatusForbidden} +) + +// gceObject is an abstraction of all GCE API object in go client +type gceObject interface { + MarshalJSON() ([]byte, error) +} + +// AddInstanceHook mocks adding a Instance to MockTargetPools +func AddInstanceHook(ctx context.Context, key *meta.Key, req *ga.TargetPoolsAddInstanceRequest, m *cloud.MockTargetPools) error { + pool, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in TargetPools", key.String()), + } + } + + for _, instance := range req.Instances { + pool.Instances = append(pool.Instances, instance.Instance) + } + + return nil +} + +// RemoveInstanceHook mocks removing a Instance from MockTargetPools +func RemoveInstanceHook(ctx context.Context, key *meta.Key, req *ga.TargetPoolsRemoveInstanceRequest, m *cloud.MockTargetPools) error { + pool, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in TargetPools", key.String()), + } + } + + for _, instanceToRemove := range req.Instances { + for i, instance := range pool.Instances { + if instanceToRemove.Instance == instance { + // Delete instance from pool.Instances without preserving order + pool.Instances[i] = pool.Instances[len(pool.Instances)-1] + pool.Instances = pool.Instances[:len(pool.Instances)-1] + break + } + } + } + + return nil +} + +func convertAndInsertAlphaForwardingRule(key *meta.Key, obj gceObject, mRules map[meta.Key]*cloud.MockForwardingRulesObj, version meta.Version, projectID string) (bool, error) { + if !key.Valid() { + return true, fmt.Errorf("invalid GCE key (%+v)", key) + } + + if _, ok := mRules[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockForwardingRule %v exists", key), + } + return true, err + } + + enc, err := obj.MarshalJSON() + if err != nil { + return true, err + } + var fwdRule alpha.ForwardingRule + if err := json.Unmarshal(enc, &fwdRule); err != nil { + return true, err + } + // Set the default values for the Alpha fields. + if fwdRule.NetworkTier == "" { + fwdRule.NetworkTier = cloud.NetworkTierDefault.ToGCEValue() + } + + fwdRule.Name = key.Name + if fwdRule.SelfLink == "" { + fwdRule.SelfLink = cloud.SelfLink(version, projectID, "forwardingRules", key) + } + + mRules[*key] = &cloud.MockForwardingRulesObj{Obj: fwdRule} + return true, nil +} + +// InsertFwdRuleHook mocks inserting a ForwardingRule. ForwardingRules are +// expected to default to Premium tier if no NetworkTier is specified. +func InsertFwdRuleHook(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionGA, "forwardingRules") + return convertAndInsertAlphaForwardingRule(key, obj, m.Objects, meta.VersionGA, projectID) +} + +// InsertBetaFwdRuleHook mocks inserting a BetaForwardingRule. +func InsertBetaFwdRuleHook(ctx context.Context, key *meta.Key, obj *beta.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "forwardingRules") + return convertAndInsertAlphaForwardingRule(key, obj, m.Objects, meta.VersionBeta, projectID) +} + +// InsertAlphaFwdRuleHook mocks inserting an AlphaForwardingRule. +func InsertAlphaFwdRuleHook(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionAlpha, "forwardingRules") + return convertAndInsertAlphaForwardingRule(key, obj, m.Objects, meta.VersionAlpha, projectID) +} + +// AddressAttributes maps from Address key to a map of Instances +type AddressAttributes struct { + IPCounter int // Used to assign Addresses with no IP a unique IP address +} + +func convertAndInsertAlphaAddress(key *meta.Key, obj gceObject, mAddrs map[meta.Key]*cloud.MockAddressesObj, version meta.Version, projectID string, addressAttrs AddressAttributes) (bool, error) { + if !key.Valid() { + return true, fmt.Errorf("invalid GCE key (%+v)", key) + } + + if _, ok := mAddrs[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAddresses %v exists", key), + } + return true, err + } + + enc, err := obj.MarshalJSON() + if err != nil { + return true, err + } + var addr alpha.Address + if err := json.Unmarshal(enc, &addr); err != nil { + return true, err + } + + // Set default address type if not present. + if addr.AddressType == "" { + addr.AddressType = string(cloud.SchemeExternal) + } + + var existingAddresses []*ga.Address + for _, obj := range mAddrs { + existingAddresses = append(existingAddresses, obj.ToGA()) + } + + for _, existingAddr := range existingAddresses { + if addr.Address == existingAddr.Address { + msg := fmt.Sprintf("MockAddresses IP %v in use", addr.Address) + + // When the IP is already in use, this call returns a StatusBadRequest + // if the address is an external address, and StatusConflict if an + // internal address. This is to be consistent with actual GCE API. + errorCode := http.StatusConflict + if addr.AddressType == string(cloud.SchemeExternal) { + errorCode = http.StatusBadRequest + } + + return true, &googleapi.Error{Code: errorCode, Message: msg} + } + } + + // Set default values used in tests + addr.Name = key.Name + if addr.SelfLink == "" { + addr.SelfLink = cloud.SelfLink(version, projectID, "addresses", key) + } + + if addr.Address == "" { + addr.Address = fmt.Sprintf("1.2.3.%d", addressAttrs.IPCounter) + addressAttrs.IPCounter++ + } + + // Set the default values for the Alpha fields. + if addr.NetworkTier == "" { + addr.NetworkTier = cloud.NetworkTierDefault.ToGCEValue() + } + + mAddrs[*key] = &cloud.MockAddressesObj{Obj: addr} + return true, nil +} + +// InsertAddressHook mocks inserting an Address. +func InsertAddressHook(ctx context.Context, key *meta.Key, obj *ga.Address, m *cloud.MockAddresses) (bool, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionGA, "addresses") + return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionGA, projectID, m.X.(AddressAttributes)) +} + +// InsertBetaAddressHook mocks inserting a BetaAddress. +func InsertBetaAddressHook(ctx context.Context, key *meta.Key, obj *beta.Address, m *cloud.MockAddresses) (bool, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "addresses") + return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionBeta, projectID, m.X.(AddressAttributes)) +} + +// InsertAlphaAddressHook mocks inserting an Address. Addresses are expected to +// default to Premium tier if no NetworkTier is specified. +func InsertAlphaAddressHook(ctx context.Context, key *meta.Key, obj *alpha.Address, m *cloud.MockAlphaAddresses) (bool, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "addresses") + return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionAlpha, projectID, m.X.(AddressAttributes)) +} + +// InstanceGroupAttributes maps from InstanceGroup key to a map of Instances +type InstanceGroupAttributes struct { + InstanceMap map[meta.Key]map[string]*ga.InstanceWithNamedPorts + Lock *sync.Mutex +} + +// AddInstances adds a list of Instances passed by InstanceReference +func (igAttrs *InstanceGroupAttributes) AddInstances(key *meta.Key, instanceRefs []*ga.InstanceReference) error { + igAttrs.Lock.Lock() + defer igAttrs.Lock.Unlock() + + instancesWithNamedPorts, ok := igAttrs.InstanceMap[*key] + if !ok { + instancesWithNamedPorts = make(map[string]*ga.InstanceWithNamedPorts) + } + + for _, instance := range instanceRefs { + iWithPort := &ga.InstanceWithNamedPorts{ + Instance: instance.Instance, + } + + instancesWithNamedPorts[instance.Instance] = iWithPort + } + + igAttrs.InstanceMap[*key] = instancesWithNamedPorts + return nil +} + +// RemoveInstances removes a list of Instances passed by InstanceReference +func (igAttrs *InstanceGroupAttributes) RemoveInstances(key *meta.Key, instanceRefs []*ga.InstanceReference) error { + igAttrs.Lock.Lock() + defer igAttrs.Lock.Unlock() + + instancesWithNamedPorts, ok := igAttrs.InstanceMap[*key] + if !ok { + instancesWithNamedPorts = make(map[string]*ga.InstanceWithNamedPorts) + } + + for _, instanceToRemove := range instanceRefs { + if _, ok := instancesWithNamedPorts[instanceToRemove.Instance]; ok { + delete(instancesWithNamedPorts, instanceToRemove.Instance) + } else { + return &googleapi.Error{ + Code: http.StatusBadRequest, + Message: fmt.Sprintf("%s is not a member of %s", instanceToRemove.Instance, key.String()), + } + } + } + + igAttrs.InstanceMap[*key] = instancesWithNamedPorts + return nil +} + +// List gets a list of InstanceWithNamedPorts +func (igAttrs *InstanceGroupAttributes) List(key *meta.Key) []*ga.InstanceWithNamedPorts { + igAttrs.Lock.Lock() + defer igAttrs.Lock.Unlock() + + instancesWithNamedPorts, ok := igAttrs.InstanceMap[*key] + if !ok { + instancesWithNamedPorts = make(map[string]*ga.InstanceWithNamedPorts) + } + + var instanceList []*ga.InstanceWithNamedPorts + for _, val := range instancesWithNamedPorts { + instanceList = append(instanceList, val) + } + + return instanceList +} + +// AddInstancesHook mocks adding instances from an InstanceGroup +func AddInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsAddInstancesRequest, m *cloud.MockInstanceGroups) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()), + } + } + + var attrs InstanceGroupAttributes + attrs = m.X.(InstanceGroupAttributes) + attrs.AddInstances(key, req.Instances) + m.X = attrs + return nil +} + +// ListInstancesHook mocks listing instances from an InstanceGroup +func ListInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsListInstancesRequest, filter *filter.F, m *cloud.MockInstanceGroups) ([]*ga.InstanceWithNamedPorts, error) { + _, err := m.Get(ctx, key) + if err != nil { + return nil, &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()), + } + } + + var attrs InstanceGroupAttributes + attrs = m.X.(InstanceGroupAttributes) + instances := attrs.List(key) + + return instances, nil +} + +// RemoveInstancesHook mocks removing instances from an InstanceGroup +func RemoveInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsRemoveInstancesRequest, m *cloud.MockInstanceGroups) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()), + } + } + + var attrs InstanceGroupAttributes + attrs = m.X.(InstanceGroupAttributes) + attrs.RemoveInstances(key, req.Instances) + m.X = attrs + return nil +} + +// UpdateFirewallHook defines the hook for updating a Firewall. It replaces the +// object with the same key in the mock with the updated object. +func UpdateFirewallHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in Firewalls", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "firewalls") + obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "firewalls", key) + + m.Objects[*key] = &cloud.MockFirewallsObj{Obj: obj} + return nil +} + +// UpdateHealthCheckHook defines the hook for updating a HealthCheck. It +// replaces the object with the same key in the mock with the updated object. +func UpdateHealthCheckHook(ctx context.Context, key *meta.Key, obj *ga.HealthCheck, m *cloud.MockHealthChecks) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in HealthChecks", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "healthChecks") + obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "healthChecks", key) + + m.Objects[*key] = &cloud.MockHealthChecksObj{Obj: obj} + return nil +} + +// UpdateRegionBackendServiceHook defines the hook for updating a Region +// BackendsService. It replaces the object with the same key in the mock with +// the updated object. +func UpdateRegionBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *cloud.MockRegionBackendServices) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in RegionBackendServices", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices") + obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "backendServices", key) + + m.Objects[*key] = &cloud.MockRegionBackendServicesObj{Obj: obj} + return nil +} + +// UpdateBackendServiceHook defines the hook for updating a BackendService. +// It replaces the object with the same key in the mock with the updated object. +func UpdateBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *cloud.MockBackendServices) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices") + obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "backendServices", key) + + m.Objects[*key] = &cloud.MockBackendServicesObj{Obj: obj} + return nil +} + +// UpdateAlphaBackendServiceHook defines the hook for updating an alpha BackendService. +// It replaces the object with the same key in the mock with the updated object. +func UpdateAlphaBackendServiceHook(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *cloud.MockAlphaBackendServices) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "backendServices") + obj.SelfLink = cloud.SelfLink(meta.VersionAlpha, projectID, "backendServices", key) + + m.Objects[*key] = &cloud.MockBackendServicesObj{Obj: obj} + return nil +} + +// UpdateBetaBackendServiceHook defines the hook for updating an beta BackendService. +// It replaces the object with the same key in the mock with the updated object. +func UpdateBetaBackendServiceHook(ctx context.Context, key *meta.Key, obj *beta.BackendService, m *cloud.MockBetaBackendServices) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "backendServices") + obj.SelfLink = cloud.SelfLink(meta.VersionBeta, projectID, "backendServices", key) + + m.Objects[*key] = &cloud.MockBackendServicesObj{Obj: obj} + return nil +} + +// UpdateURLMapHook defines the hook for updating a UrlMap. +// It replaces the object with the same key in the mock with the updated object. +func UpdateURLMapHook(ctx context.Context, key *meta.Key, obj *ga.UrlMap, m *cloud.MockUrlMaps) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in UrlMaps", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "urlMaps") + obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "urlMaps", key) + + m.Objects[*key] = &cloud.MockUrlMapsObj{Obj: obj} + return nil +} + +// InsertFirewallsUnauthorizedErrHook mocks firewall insertion. A forbidden error will be thrown as return. +func InsertFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) (bool, error) { + return true, &googleapi.Error{Code: http.StatusForbidden} +} + +// UpdateFirewallsUnauthorizedErrHook mocks firewall updating. A forbidden error will be thrown as return. +func UpdateFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) error { + return &googleapi.Error{Code: http.StatusForbidden} +} + +// DeleteFirewallsUnauthorizedErrHook mocks firewall deletion. A forbidden error will be thrown as return. +func DeleteFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, m *cloud.MockFirewalls) (bool, error) { + return true, &googleapi.Error{Code: http.StatusForbidden} +} + +// GetFirewallsUnauthorizedErrHook mocks firewall information retrival. A forbidden error will be thrown as return. +func GetFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, m *cloud.MockFirewalls) (bool, *ga.Firewall, error) { + return true, nil, &googleapi.Error{Code: http.StatusForbidden} +} + +// GetTargetPoolInternalErrHook mocks getting target pool. It returns a internal server error. +func GetTargetPoolInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockTargetPools) (bool, *ga.TargetPool, error) { + return true, nil, InternalServerError +} + +// GetForwardingRulesInternalErrHook mocks getting forwarding rules and returns an internal server error. +func GetForwardingRulesInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockForwardingRules) (bool, *ga.ForwardingRule, error) { + return true, nil, InternalServerError +} + +// GetAddressesInternalErrHook mocks getting network address and returns an internal server error. +func GetAddressesInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockAddresses) (bool, *ga.Address, error) { + return true, nil, InternalServerError +} + +// GetHTTPHealthChecksInternalErrHook mocks getting http health check and returns an internal server error. +func GetHTTPHealthChecksInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockHttpHealthChecks) (bool, *ga.HttpHealthCheck, error) { + return true, nil, InternalServerError +} + +// InsertTargetPoolsInternalErrHook mocks getting target pool and returns an internal server error. +func InsertTargetPoolsInternalErrHook(ctx context.Context, key *meta.Key, obj *ga.TargetPool, m *cloud.MockTargetPools) (bool, error) { + return true, InternalServerError +} + +// InsertForwardingRulesInternalErrHook mocks getting forwarding rule and returns an internal server error. +func InsertForwardingRulesInternalErrHook(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) { + return true, InternalServerError +} + +// DeleteAddressesNotFoundErrHook mocks deleting network address and returns a not found error. +func DeleteAddressesNotFoundErrHook(ctx context.Context, key *meta.Key, m *cloud.MockAddresses) (bool, error) { + return true, &googleapi.Error{Code: http.StatusNotFound} +} + +// DeleteAddressesInternalErrHook mocks deleting address and returns an internal server error. +func DeleteAddressesInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockAddresses) (bool, error) { + return true, InternalServerError +} + +// InsertAlphaBackendServiceUnauthorizedErrHook mocks inserting an alpha BackendService and returns a forbidden error. +func InsertAlphaBackendServiceUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *cloud.MockAlphaBackendServices) (bool, error) { + return true, UnauthorizedErr +} + +// UpdateAlphaBackendServiceUnauthorizedErrHook mocks updating an alpha BackendService and returns a forbidden error. +func UpdateAlphaBackendServiceUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *cloud.MockAlphaBackendServices) error { + return UnauthorizedErr +} + +// GetRegionBackendServicesErrHook mocks getting region backend service and returns an internal server error. +func GetRegionBackendServicesErrHook(ctx context.Context, key *meta.Key, m *cloud.MockRegionBackendServices) (bool, *ga.BackendService, error) { + return true, nil, InternalServerError +} + +// UpdateRegionBackendServicesErrHook mocks updating a reegion backend service and returns an internal server error. +func UpdateRegionBackendServicesErrHook(ctx context.Context, key *meta.Key, svc *ga.BackendService, m *cloud.MockRegionBackendServices) error { + return InternalServerError +} + +// DeleteRegionBackendServicesErrHook mocks deleting region backend service and returns an internal server error. +func DeleteRegionBackendServicesErrHook(ctx context.Context, key *meta.Key, m *cloud.MockRegionBackendServices) (bool, error) { + return true, InternalServerError +} + +// DeleteRegionBackendServicesInUseErrHook mocks deleting region backend service and returns an InUseError. +func DeleteRegionBackendServicesInUseErrHook(ctx context.Context, key *meta.Key, m *cloud.MockRegionBackendServices) (bool, error) { + return true, InUseError +} + +// GetInstanceGroupInternalErrHook mocks getting instance group and returns an internal server error. +func GetInstanceGroupInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockInstanceGroups) (bool, *ga.InstanceGroup, error) { + return true, nil, InternalServerError +} + +// GetHealthChecksInternalErrHook mocks getting health check and returns an internal server erorr. +func GetHealthChecksInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockHealthChecks) (bool, *ga.HealthCheck, error) { + return true, nil, InternalServerError +} + +// DeleteHealthChecksInternalErrHook mocks deleting health check and returns an internal server error. +func DeleteHealthChecksInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockHealthChecks) (bool, error) { + return true, InternalServerError +} + +// DeleteHealthChecksInuseErrHook mocks deleting health check and returns an in use error. +func DeleteHealthChecksInuseErrHook(ctx context.Context, key *meta.Key, m *cloud.MockHealthChecks) (bool, error) { + return true, InUseError +} + +// DeleteForwardingRuleErrHook mocks deleting forwarding rule and returns an internal server error. +func DeleteForwardingRuleErrHook(ctx context.Context, key *meta.Key, m *cloud.MockForwardingRules) (bool, error) { + return true, InternalServerError +} + +// ListZonesInternalErrHook mocks listing zone and returns an internal server error. +func ListZonesInternalErrHook(ctx context.Context, fl *filter.F, m *cloud.MockZones) (bool, []*ga.Zone, error) { + return true, []*ga.Zone{}, InternalServerError +} + +// DeleteInstanceGroupInternalErrHook mocks deleting instance group and returns an internal server error. +func DeleteInstanceGroupInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockInstanceGroups) (bool, error) { + return true, InternalServerError +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go index 2933fe223b79c..eb45c769e4615 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "github.com/golang/glog" + "k8s.io/klog" alpha "google.golang.org/api/compute/v0.alpha" beta "google.golang.org/api/compute/v0.beta" @@ -67,13 +67,13 @@ func (o *gaOperation) isDone(ctx context.Context) (bool, error) { switch o.key.Type() { case meta.Regional: op, err = o.s.GA.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("GA.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) + klog.V(5).Infof("GA.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) case meta.Zonal: op, err = o.s.GA.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("GA.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) + klog.V(5).Infof("GA.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) case meta.Global: op, err = o.s.GA.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("GA.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) + klog.V(5).Infof("GA.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) default: return false, fmt.Errorf("invalid key type: %#v", o.key) } @@ -124,13 +124,13 @@ func (o *alphaOperation) isDone(ctx context.Context) (bool, error) { switch o.key.Type() { case meta.Regional: op, err = o.s.Alpha.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Alpha.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) + klog.V(5).Infof("Alpha.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) case meta.Zonal: op, err = o.s.Alpha.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Alpha.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) + klog.V(5).Infof("Alpha.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) case meta.Global: op, err = o.s.Alpha.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Alpha.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) + klog.V(5).Infof("Alpha.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) default: return false, fmt.Errorf("invalid key type: %#v", o.key) } @@ -181,13 +181,13 @@ func (o *betaOperation) isDone(ctx context.Context) (bool, error) { switch o.key.Type() { case meta.Regional: op, err = o.s.Beta.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Beta.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) + klog.V(5).Infof("Beta.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) case meta.Zonal: op, err = o.s.Beta.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Beta.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) + klog.V(5).Infof("Beta.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) case meta.Global: op, err = o.s.Beta.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Beta.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) + klog.V(5).Infof("Beta.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) default: return false, fmt.Errorf("invalid key type: %#v", o.key) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service.go index 2f332dfff854a..4d7b4c557f2a8 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "github.com/golang/glog" + "k8s.io/klog" alpha "google.golang.org/api/compute/v0.alpha" beta "google.golang.org/api/compute/v0.beta" @@ -69,7 +69,7 @@ func (s *Service) wrapOperation(anyOp interface{}) (operation, error) { func (s *Service) WaitForCompletion(ctx context.Context, genericOp interface{}) error { op, err := s.wrapOperation(genericOp) if err != nil { - glog.Errorf("wrapOperation(%+v) error: %v", genericOp, err) + klog.Errorf("wrapOperation(%+v) error: %v", genericOp, err) return err } @@ -86,18 +86,18 @@ func (s *Service) pollOperation(ctx context.Context, op operation) error { // returning ctx.Err(). select { case <-ctx.Done(): - glog.V(5).Infof("op.pollOperation(%v, %v) not completed, poll count = %d, ctx.Err = %v", ctx, op, pollCount, ctx.Err()) + klog.V(5).Infof("op.pollOperation(%v, %v) not completed, poll count = %d, ctx.Err = %v", ctx, op, pollCount, ctx.Err()) return ctx.Err() default: // ctx is not canceled, continue immediately } pollCount++ - glog.V(5).Infof("op.isDone(%v) waiting; op = %v, poll count = %d", ctx, op, pollCount) + klog.V(5).Infof("op.isDone(%v) waiting; op = %v, poll count = %d", ctx, op, pollCount) s.RateLimiter.Accept(ctx, op.rateLimitKey()) done, err := op.isDone(ctx) if err != nil { - glog.V(5).Infof("op.isDone(%v) error; op = %v, poll count = %d, err = %v, retrying", ctx, op, pollCount, err) + klog.V(5).Infof("op.isDone(%v) error; op = %v, poll count = %d, err = %v, retrying", ctx, op, pollCount, err) } if done { @@ -105,6 +105,6 @@ func (s *Service) pollOperation(ctx context.Context, op operation) error { } } - glog.V(5).Infof("op.isDone(%v) complete; op = %v, poll count = %d, op.err = %v", ctx, op, pollCount, op.error()) + klog.V(5).Infof("op.isDone(%v) complete; op = %v, poll count = %d, op.err = %v", ctx, op, pollCount, op.error()) return op.error() } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go index b584986e4fd02..616fe5f547750 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go @@ -30,13 +30,13 @@ import ( gcfg "gopkg.in/gcfg.v1" "cloud.google.com/go/compute/metadata" - "github.com/golang/glog" "golang.org/x/oauth2" "golang.org/x/oauth2/google" computealpha "google.golang.org/api/compute/v0.alpha" computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" container "google.golang.org/api/container/v1" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -49,7 +49,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/controller" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -57,6 +57,7 @@ import ( ) const ( + // ProviderName is the official const representation of the Google Cloud Provider ProviderName = "gce" k8sNodeRouteTag = "k8s-node-route" @@ -97,9 +98,9 @@ type gceObject interface { MarshalJSON() ([]byte, error) } -// GCECloud is an implementation of Interface, LoadBalancer and Instances for Google Compute Engine. -type GCECloud struct { - // ClusterID contains functionality for getting (and initializing) the ingress-uid. Call GCECloud.Initialize() +// Cloud is an implementation of Interface, LoadBalancer and Instances for Google Compute Engine. +type Cloud struct { + // ClusterID contains functionality for getting (and initializing) the ingress-uid. Call Cloud.Initialize() // for the cloudprovider to start watching the configmap. ClusterID ClusterID @@ -145,7 +146,7 @@ type GCECloud struct { // lock to prevent shared resources from being prematurely deleted while the operation is // in progress. sharedResourceLock sync.Mutex - // AlphaFeatureGate gates gce alpha features in GCECloud instance. + // AlphaFeatureGate gates gce alpha features in Cloud instance. // Related wrapper functions that interacts with gce alpha api should examine whether // the corresponding api is enabled. // If not enabled, it should return error. @@ -158,6 +159,7 @@ type GCECloud struct { s *cloud.Service } +// ConfigGlobal is the in memory representation of the gce.conf config data // TODO: replace gcfg with json type ConfigGlobal struct { TokenURL string `gcfg:"token-url"` @@ -177,12 +179,12 @@ type ConfigGlobal struct { NodeInstancePrefix string `gcfg:"node-instance-prefix"` Regional bool `gcfg:"regional"` Multizone bool `gcfg:"multizone"` - // ApiEndpoint is the GCE compute API endpoint to use. If this is blank, + // APIEndpoint is the GCE compute API endpoint to use. If this is blank, // then the default endpoint is used. - ApiEndpoint string `gcfg:"api-endpoint"` - // ContainerApiEndpoint is the GCE container API endpoint to use. If this is blank, + APIEndpoint string `gcfg:"api-endpoint"` + // ContainerAPIEndpoint is the GCE container API endpoint to use. If this is blank, // then the default endpoint is used. - ContainerApiEndpoint string `gcfg:"container-api-endpoint"` + ContainerAPIEndpoint string `gcfg:"container-api-endpoint"` // LocalZone specifies the GCE zone that gce cloud client instance is // located in (i.e. where the controller will be running). If this is // blank, then the local zone will be discovered via the metadata server. @@ -197,10 +199,10 @@ type ConfigFile struct { Global ConfigGlobal `gcfg:"global"` } -// CloudConfig includes all the necessary configuration for creating GCECloud +// CloudConfig includes all the necessary configuration for creating Cloud type CloudConfig struct { - ApiEndpoint string - ContainerApiEndpoint string + APIEndpoint string + ContainerAPIEndpoint string ProjectID string NetworkProjectID string Region string @@ -236,22 +238,22 @@ type Services struct { } // ComputeServices returns access to the internal compute services. -func (g *GCECloud) ComputeServices() *Services { +func (g *Cloud) ComputeServices() *Services { return &Services{g.service, g.serviceAlpha, g.serviceBeta} } // Compute returns the generated stubs for the compute API. -func (g *GCECloud) Compute() cloud.Cloud { +func (g *Cloud) Compute() cloud.Cloud { return g.c } // ContainerService returns the container service. -func (g *GCECloud) ContainerService() *container.Service { +func (g *Cloud) ContainerService() *container.Service { return g.containerService } -// newGCECloud creates a new instance of GCECloud. -func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) { +// newGCECloud creates a new instance of Cloud. +func newGCECloud(config io.Reader) (gceCloud *Cloud, err error) { var cloudConfig *CloudConfig var configFile *ConfigFile @@ -260,7 +262,7 @@ func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) { if err != nil { return nil, err } - glog.Infof("Using GCE provider config %+v", configFile) + klog.Infof("Using GCE provider config %+v", configFile) } cloudConfig, err = generateCloudConfig(configFile) @@ -273,7 +275,7 @@ func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) { func readConfig(reader io.Reader) (*ConfigFile, error) { cfg := &ConfigFile{} if err := gcfg.FatalOnly(gcfg.ReadInto(cfg, reader)); err != nil { - glog.Errorf("Couldn't read config: %v", err) + klog.Errorf("Couldn't read config: %v", err) return nil, err } return cfg, nil @@ -286,12 +288,12 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err cloudConfig.UseMetadataServer = true cloudConfig.AlphaFeatureGate = NewAlphaFeatureGate([]string{}) if configFile != nil { - if configFile.Global.ApiEndpoint != "" { - cloudConfig.ApiEndpoint = configFile.Global.ApiEndpoint + if configFile.Global.APIEndpoint != "" { + cloudConfig.APIEndpoint = configFile.Global.APIEndpoint } - if configFile.Global.ContainerApiEndpoint != "" { - cloudConfig.ContainerApiEndpoint = configFile.Global.ContainerApiEndpoint + if configFile.Global.ContainerAPIEndpoint != "" { + cloudConfig.ContainerAPIEndpoint = configFile.Global.ContainerAPIEndpoint } if configFile.Global.TokenURL != "" { @@ -377,11 +379,11 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err return cloudConfig, err } -// CreateGCECloud creates a GCECloud object using the specified parameters. +// CreateGCECloud creates a Cloud object using the specified parameters. // If no networkUrl is specified, loads networkName via rest call. // If no tokenSource is specified, uses oauth2.DefaultTokenSource. // If managedZones is nil / empty all zones in the region will be managed. -func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { +func CreateGCECloud(config *CloudConfig) (*Cloud, error) { // Remove any pre-release version and build metadata from the semver, // leaving only the MAJOR.MINOR.PATCH portion. See http://semver.org/. version := strings.TrimLeft(strings.Split(strings.Split(version.Get().GitVersion, "-")[0], "+")[0], "v") @@ -429,10 +431,10 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { // Generate alpha and beta api endpoints based on override v1 api endpoint. // For example, // staging API endpoint: https://www.googleapis.com/compute/staging_v1/ - if config.ApiEndpoint != "" { - service.BasePath = fmt.Sprintf("%sprojects/", config.ApiEndpoint) - serviceBeta.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.ApiEndpoint, "v1", "beta", -1)) - serviceAlpha.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.ApiEndpoint, "v1", "alpha", -1)) + if config.APIEndpoint != "" { + service.BasePath = fmt.Sprintf("%sprojects/", config.APIEndpoint) + serviceBeta.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.APIEndpoint, "v1", "beta", -1)) + serviceAlpha.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.APIEndpoint, "v1", "alpha", -1)) } containerService, err := container.New(client) @@ -440,8 +442,8 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { return nil, err } containerService.UserAgent = userAgent - if config.ContainerApiEndpoint != "" { - containerService.BasePath = config.ContainerApiEndpoint + if config.ContainerAPIEndpoint != "" { + containerService.BasePath = config.ContainerAPIEndpoint } tpuService, err := newTPUService(client) @@ -460,17 +462,17 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { if config.NetworkURL != "" { networkURL = config.NetworkURL } else if config.NetworkName != "" { - networkURL = gceNetworkURL(config.ApiEndpoint, netProjID, config.NetworkName) + networkURL = gceNetworkURL(config.APIEndpoint, netProjID, config.NetworkName) } else { // Other consumers may use the cloudprovider without utilizing the wrapped GCE API functions // or functions requiring network/subnetwork URLs (e.g. Kubelet). - glog.Warningf("No network name or URL specified.") + klog.Warningf("No network name or URL specified.") } if config.SubnetworkURL != "" { subnetURL = config.SubnetworkURL } else if config.SubnetworkName != "" { - subnetURL = gceSubnetworkURL(config.ApiEndpoint, netProjID, config.Region, config.SubnetworkName) + subnetURL = gceSubnetworkURL(config.APIEndpoint, netProjID, config.Region, config.SubnetworkName) } else { // Determine the type of network and attempt to discover the correct subnet for AUTO mode. // Gracefully fail because kubelet calls CreateGCECloud without any config, and minions @@ -478,20 +480,20 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { if networkName := lastComponent(networkURL); networkName != "" { var n *compute.Network if n, err = getNetwork(service, netProjID, networkName); err != nil { - glog.Warningf("Could not retrieve network %q; err: %v", networkName, err) + klog.Warningf("Could not retrieve network %q; err: %v", networkName, err) } else { switch typeOfNetwork(n) { case netTypeLegacy: - glog.Infof("Network %q is type legacy - no subnetwork", networkName) + klog.Infof("Network %q is type legacy - no subnetwork", networkName) isLegacyNetwork = true case netTypeCustom: - glog.Warningf("Network %q is type custom - cannot auto select a subnetwork", networkName) + klog.Warningf("Network %q is type custom - cannot auto select a subnetwork", networkName) case netTypeAuto: subnetURL, err = determineSubnetURL(service, netProjID, networkName, config.Region) if err != nil { - glog.Warningf("Could not determine subnetwork for network %q and region %v; err: %v", networkName, config.Region, err) + klog.Warningf("Could not determine subnetwork for network %q and region %v; err: %v", networkName, config.Region, err) } else { - glog.Infof("Auto selecting subnetwork %q", subnetURL) + klog.Infof("Auto selecting subnetwork %q", subnetURL) } } } @@ -505,12 +507,12 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { } } if len(config.ManagedZones) > 1 { - glog.Infof("managing multiple zones: %v", config.ManagedZones) + klog.Infof("managing multiple zones: %v", config.ManagedZones) } operationPollRateLimiter := flowcontrol.NewTokenBucketRateLimiter(5, 5) // 5 qps, 5 burst. - gce := &GCECloud{ + gce := &Cloud{ service: service, serviceAlpha: serviceAlpha, serviceBeta: serviceBeta, @@ -550,8 +552,8 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { // SetRateLimiter adds a custom cloud.RateLimiter implementation. // WARNING: Calling this could have unexpected behavior if you have in-flight -// requests. It is best to use this immediately after creating a GCECloud. -func (g *GCECloud) SetRateLimiter(rl cloud.RateLimiter) { +// requests. It is best to use this immediately after creating a Cloud. +func (g *Cloud) SetRateLimiter(rl cloud.RateLimiter) { if rl != nil { g.s.RateLimiter = rl } @@ -586,7 +588,7 @@ func tryConvertToProjectNames(configProject, configNetworkProject string, servic if isProjectNumber(projID) { projName, err := getProjectID(service, projID) if err != nil { - glog.Warningf("Failed to retrieve project %v while trying to retrieve its name. err %v", projID, err) + klog.Warningf("Failed to retrieve project %v while trying to retrieve its name. err %v", projID, err) } else { projID = projName } @@ -599,7 +601,7 @@ func tryConvertToProjectNames(configProject, configNetworkProject string, servic if isProjectNumber(netProjID) { netProjName, err := getProjectID(service, netProjID) if err != nil { - glog.Warningf("Failed to retrieve network project %v while trying to retrieve its name. err %v", netProjID, err) + klog.Warningf("Failed to retrieve network project %v while trying to retrieve its name. err %v", netProjID, err) } else { netProjID = netProjName } @@ -610,89 +612,92 @@ func tryConvertToProjectNames(configProject, configNetworkProject string, servic // Initialize takes in a clientBuilder and spawns a goroutine for watching the clusterid configmap. // This must be called before utilizing the funcs of gce.ClusterID -func (gce *GCECloud) Initialize(clientBuilder controller.ControllerClientBuilder) { - gce.clientBuilder = clientBuilder - gce.client = clientBuilder.ClientOrDie("cloud-provider") +func (g *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { + g.clientBuilder = clientBuilder + g.client = clientBuilder.ClientOrDie("cloud-provider") - if gce.OnXPN() { - gce.eventBroadcaster = record.NewBroadcaster() - gce.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: gce.client.CoreV1().Events("")}) - gce.eventRecorder = gce.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "gce-cloudprovider"}) + if g.OnXPN() { + g.eventBroadcaster = record.NewBroadcaster() + g.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: g.client.CoreV1().Events("")}) + g.eventRecorder = g.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "g-cloudprovider"}) } - go gce.watchClusterID() + go g.watchClusterID(stop) } // LoadBalancer returns an implementation of LoadBalancer for Google Compute Engine. -func (gce *GCECloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { - return gce, true +func (g *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { + return g, true } // Instances returns an implementation of Instances for Google Compute Engine. -func (gce *GCECloud) Instances() (cloudprovider.Instances, bool) { - return gce, true +func (g *Cloud) Instances() (cloudprovider.Instances, bool) { + return g, true } // Zones returns an implementation of Zones for Google Compute Engine. -func (gce *GCECloud) Zones() (cloudprovider.Zones, bool) { - return gce, true +func (g *Cloud) Zones() (cloudprovider.Zones, bool) { + return g, true } -func (gce *GCECloud) Clusters() (cloudprovider.Clusters, bool) { - return gce, true +// Clusters returns an implementation of Clusters for Google Compute Engine. +func (g *Cloud) Clusters() (cloudprovider.Clusters, bool) { + return g, true } // Routes returns an implementation of Routes for Google Compute Engine. -func (gce *GCECloud) Routes() (cloudprovider.Routes, bool) { - return gce, true +func (g *Cloud) Routes() (cloudprovider.Routes, bool) { + return g, true } // ProviderName returns the cloud provider ID. -func (gce *GCECloud) ProviderName() string { +func (g *Cloud) ProviderName() string { return ProviderName } // ProjectID returns the ProjectID corresponding to the project this cloud is in. -func (g *GCECloud) ProjectID() string { +func (g *Cloud) ProjectID() string { return g.projectID } // NetworkProjectID returns the ProjectID corresponding to the project this cluster's network is in. -func (g *GCECloud) NetworkProjectID() string { +func (g *Cloud) NetworkProjectID() string { return g.networkProjectID } // Region returns the region -func (gce *GCECloud) Region() string { - return gce.region +func (g *Cloud) Region() string { + return g.region } // OnXPN returns true if the cluster is running on a cross project network (XPN) -func (gce *GCECloud) OnXPN() bool { - return gce.onXPN +func (g *Cloud) OnXPN() bool { + return g.onXPN } // NetworkURL returns the network url -func (gce *GCECloud) NetworkURL() string { - return gce.networkURL +func (g *Cloud) NetworkURL() string { + return g.networkURL } // SubnetworkURL returns the subnetwork url -func (gce *GCECloud) SubnetworkURL() string { - return gce.subnetworkURL +func (g *Cloud) SubnetworkURL() string { + return g.subnetworkURL } -func (gce *GCECloud) IsLegacyNetwork() bool { - return gce.isLegacyNetwork +// IsLegacyNetwork returns true if the cluster is still running a legacy network configuration. +func (g *Cloud) IsLegacyNetwork() bool { + return g.isLegacyNetwork } -func (gce *GCECloud) SetInformers(informerFactory informers.SharedInformerFactory) { - glog.Infof("Setting up informers for GCECloud") +// SetInformers sets up the zone handlers we need watching for node changes. +func (g *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { + klog.Infof("Setting up informers for Cloud") nodeInformer := informerFactory.Core().V1().Nodes().Informer() nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { node := obj.(*v1.Node) - gce.updateNodeZones(nil, node) + g.updateNodeZones(nil, node) }, UpdateFunc: func(prev, obj interface{}) { prevNode := prev.(*v1.Node) @@ -701,7 +706,7 @@ func (gce *GCECloud) SetInformers(informerFactory informers.SharedInformerFactor prevNode.Labels[kubeletapis.LabelZoneFailureDomain] { return } - gce.updateNodeZones(prevNode, newNode) + g.updateNodeZones(prevNode, newNode) }, DeleteFunc: func(obj interface{}) { node, isNode := obj.(*v1.Node) @@ -710,46 +715,46 @@ func (gce *GCECloud) SetInformers(informerFactory informers.SharedInformerFactor if !isNode { deletedState, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Received unexpected object: %v", obj) + klog.Errorf("Received unexpected object: %v", obj) return } node, ok = deletedState.Obj.(*v1.Node) if !ok { - glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) + klog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) return } } - gce.updateNodeZones(node, nil) + g.updateNodeZones(node, nil) }, }) - gce.nodeInformerSynced = nodeInformer.HasSynced + g.nodeInformerSynced = nodeInformer.HasSynced } -func (gce *GCECloud) updateNodeZones(prevNode, newNode *v1.Node) { - gce.nodeZonesLock.Lock() - defer gce.nodeZonesLock.Unlock() +func (g *Cloud) updateNodeZones(prevNode, newNode *v1.Node) { + g.nodeZonesLock.Lock() + defer g.nodeZonesLock.Unlock() if prevNode != nil { prevZone, ok := prevNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] if ok { - gce.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name) - if gce.nodeZones[prevZone].Len() == 0 { - gce.nodeZones[prevZone] = nil + g.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name) + if g.nodeZones[prevZone].Len() == 0 { + g.nodeZones[prevZone] = nil } } } if newNode != nil { newZone, ok := newNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] if ok { - if gce.nodeZones[newZone] == nil { - gce.nodeZones[newZone] = sets.NewString() + if g.nodeZones[newZone] == nil { + g.nodeZones[newZone] = sets.NewString() } - gce.nodeZones[newZone].Insert(newNode.ObjectMeta.Name) + g.nodeZones[newZone].Insert(newNode.ObjectMeta.Name) } } } // HasClusterID returns true if the cluster has a clusterID -func (gce *GCECloud) HasClusterID() bool { +func (g *Cloud) HasClusterID() bool { return true } @@ -760,8 +765,8 @@ func isProjectNumber(idOrNumber string) bool { return err == nil } -// GCECloud implements cloudprovider.Interface. -var _ cloudprovider.Interface = (*GCECloud)(nil) +// Cloud implements cloudprovider.Interface. +var _ cloudprovider.Interface = (*Cloud)(nil) func gceNetworkURL(apiEndpoint, project, network string) string { if apiEndpoint == "" { @@ -866,12 +871,12 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) { oauth2.NoContext, compute.CloudPlatformScope, compute.ComputeScope) - glog.Infof("Using DefaultTokenSource %#v", tokenSource) + klog.Infof("Using DefaultTokenSource %#v", tokenSource) if err != nil { return nil, err } } else { - glog.Infof("Using existing Token Source %#v", tokenSource) + klog.Infof("Using existing Token Source %#v", tokenSource) } backoff := wait.Backoff{ @@ -882,7 +887,7 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) { } if err := wait.ExponentialBackoff(backoff, func() (bool, error) { if _, err := tokenSource.Token(); err != nil { - glog.Errorf("error fetching initial token: %v", err) + klog.Errorf("error fetching initial token: %v", err) return false, nil } return true, nil @@ -894,19 +899,19 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) { } func (manager *gceServiceManager) getProjectsAPIEndpoint() string { - projectsApiEndpoint := gceComputeAPIEndpoint + "projects/" + projectsAPIEndpoint := gceComputeAPIEndpoint + "projects/" if manager.gce.service != nil { - projectsApiEndpoint = manager.gce.service.BasePath + projectsAPIEndpoint = manager.gce.service.BasePath } - return projectsApiEndpoint + return projectsAPIEndpoint } func (manager *gceServiceManager) getProjectsAPIEndpointBeta() string { - projectsApiEndpoint := gceComputeAPIEndpointBeta + "projects/" + projectsAPIEndpoint := gceComputeAPIEndpointBeta + "projects/" if manager.gce.service != nil { - projectsApiEndpoint = manager.gce.serviceBeta.BasePath + projectsAPIEndpoint = manager.gce.serviceBeta.BasePath } - return projectsApiEndpoint + return projectsAPIEndpoint } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go index 449b33a0d21ce..51b9bc5e718f7 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go @@ -22,7 +22,7 @@ import ( compute "google.golang.org/api/compute/v1" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" ) @@ -62,7 +62,7 @@ func (am *addressManager) HoldAddress() (string, error) { // could be reserving another address; therefore, it would need to be deleted. In the normal // case of using a controller address, retrieving the address by name results in the fewest API // calls since it indicates whether a Delete is necessary before Reserve. - glog.V(4).Infof("%v: attempting hold of IP %q Type %q", am.logPrefix, am.targetIP, am.addressType) + klog.V(4).Infof("%v: attempting hold of IP %q Type %q", am.logPrefix, am.targetIP, am.addressType) // Get the address in case it was orphaned earlier addr, err := am.svc.GetRegionAddress(am.name, am.region) if err != nil && !isNotFound(err) { @@ -73,20 +73,20 @@ func (am *addressManager) HoldAddress() (string, error) { // If address exists, check if the address had the expected attributes. validationError := am.validateAddress(addr) if validationError == nil { - glog.V(4).Infof("%v: address %q already reserves IP %q Type %q. No further action required.", am.logPrefix, addr.Name, addr.Address, addr.AddressType) + klog.V(4).Infof("%v: address %q already reserves IP %q Type %q. No further action required.", am.logPrefix, addr.Name, addr.Address, addr.AddressType) return addr.Address, nil } - glog.V(2).Infof("%v: deleting existing address because %v", am.logPrefix, validationError) + klog.V(2).Infof("%v: deleting existing address because %v", am.logPrefix, validationError) err := am.svc.DeleteRegionAddress(addr.Name, am.region) if err != nil { if isNotFound(err) { - glog.V(4).Infof("%v: address %q was not found. Ignoring.", am.logPrefix, addr.Name) + klog.V(4).Infof("%v: address %q was not found. Ignoring.", am.logPrefix, addr.Name) } else { return "", err } } else { - glog.V(4).Infof("%v: successfully deleted previous address %q", am.logPrefix, addr.Name) + klog.V(4).Infof("%v: successfully deleted previous address %q", am.logPrefix, addr.Name) } } @@ -96,23 +96,23 @@ func (am *addressManager) HoldAddress() (string, error) { // ReleaseAddress will release the address if it's owned by the controller. func (am *addressManager) ReleaseAddress() error { if !am.tryRelease { - glog.V(4).Infof("%v: not attempting release of address %q.", am.logPrefix, am.targetIP) + klog.V(4).Infof("%v: not attempting release of address %q.", am.logPrefix, am.targetIP) return nil } - glog.V(4).Infof("%v: releasing address %q named %q", am.logPrefix, am.targetIP, am.name) + klog.V(4).Infof("%v: releasing address %q named %q", am.logPrefix, am.targetIP, am.name) // Controller only ever tries to unreserve the address named with the load balancer's name. err := am.svc.DeleteRegionAddress(am.name, am.region) if err != nil { if isNotFound(err) { - glog.Warningf("%v: address %q was not found. Ignoring.", am.logPrefix, am.name) + klog.Warningf("%v: address %q was not found. Ignoring.", am.logPrefix, am.name) return nil } return err } - glog.V(4).Infof("%v: successfully released IP %q named %q", am.logPrefix, am.targetIP, am.name) + klog.V(4).Infof("%v: successfully released IP %q named %q", am.logPrefix, am.targetIP, am.name) return nil } @@ -130,7 +130,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) { reserveErr := am.svc.ReserveRegionAddress(newAddr, am.region) if reserveErr == nil { if newAddr.Address != "" { - glog.V(4).Infof("%v: successfully reserved IP %q with name %q", am.logPrefix, newAddr.Address, newAddr.Name) + klog.V(4).Infof("%v: successfully reserved IP %q with name %q", am.logPrefix, newAddr.Address, newAddr.Name) return newAddr.Address, nil } @@ -139,7 +139,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) { return "", err } - glog.V(4).Infof("%v: successfully created address %q which reserved IP %q", am.logPrefix, addr.Name, addr.Address) + klog.V(4).Infof("%v: successfully created address %q which reserved IP %q", am.logPrefix, addr.Name, addr.Address) return addr.Address, nil } else if !isHTTPErrorCode(reserveErr, http.StatusConflict) && !isHTTPErrorCode(reserveErr, http.StatusBadRequest) { // If the IP is already reserved: @@ -169,10 +169,10 @@ func (am *addressManager) ensureAddressReservation() (string, error) { if am.isManagedAddress(addr) { // The address with this name is checked at the beginning of 'HoldAddress()', but for some reason // it was re-created by this point. May be possible that two controllers are running. - glog.Warningf("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP) + klog.Warningf("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP) } else { // If the retrieved address is not named with the loadbalancer name, then the controller does not own it, but will allow use of it. - glog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description) + klog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description) am.tryRelease = false } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go index 2bf3e20b051b8..044258f1b4e35 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go @@ -19,7 +19,7 @@ package gce import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" computealpha "google.golang.org/api/compute/v0.alpha" computebeta "google.golang.org/api/compute/v0.beta" @@ -42,106 +42,106 @@ func newAddressMetricContextWithVersion(request, region, version string) *metric // Caller is allocated a random IP if they do not specify an ipAddress. If an // ipAddress is specified, it must belong to the current project, eg: an // ephemeral IP associated with a global forwarding rule. -func (gce *GCECloud) ReserveGlobalAddress(addr *compute.Address) error { +func (g *Cloud) ReserveGlobalAddress(addr *compute.Address) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("reserve", "") - return mc.Observe(gce.c.GlobalAddresses().Insert(ctx, meta.GlobalKey(addr.Name), addr)) + return mc.Observe(g.c.GlobalAddresses().Insert(ctx, meta.GlobalKey(addr.Name), addr)) } // DeleteGlobalAddress deletes a global address by name. -func (gce *GCECloud) DeleteGlobalAddress(name string) error { +func (g *Cloud) DeleteGlobalAddress(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("delete", "") - return mc.Observe(gce.c.GlobalAddresses().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.GlobalAddresses().Delete(ctx, meta.GlobalKey(name))) } // GetGlobalAddress returns the global address by name. -func (gce *GCECloud) GetGlobalAddress(name string) (*compute.Address, error) { +func (g *Cloud) GetGlobalAddress(name string) (*compute.Address, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("get", "") - v, err := gce.c.GlobalAddresses().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.GlobalAddresses().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // ReserveRegionAddress creates a region address -func (gce *GCECloud) ReserveRegionAddress(addr *compute.Address, region string) error { +func (g *Cloud) ReserveRegionAddress(addr *compute.Address, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("reserve", region) - return mc.Observe(gce.c.Addresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr)) + return mc.Observe(g.c.Addresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr)) } // ReserveAlphaRegionAddress creates an Alpha, regional address. -func (gce *GCECloud) ReserveAlphaRegionAddress(addr *computealpha.Address, region string) error { +func (g *Cloud) ReserveAlphaRegionAddress(addr *computealpha.Address, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("reserve", region) - return mc.Observe(gce.c.AlphaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr)) + return mc.Observe(g.c.AlphaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr)) } // ReserveBetaRegionAddress creates a beta region address -func (gce *GCECloud) ReserveBetaRegionAddress(addr *computebeta.Address, region string) error { +func (g *Cloud) ReserveBetaRegionAddress(addr *computebeta.Address, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("reserve", region) - return mc.Observe(gce.c.BetaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr)) + return mc.Observe(g.c.BetaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr)) } // DeleteRegionAddress deletes a region address by name. -func (gce *GCECloud) DeleteRegionAddress(name, region string) error { +func (g *Cloud) DeleteRegionAddress(name, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("delete", region) - return mc.Observe(gce.c.Addresses().Delete(ctx, meta.RegionalKey(name, region))) + return mc.Observe(g.c.Addresses().Delete(ctx, meta.RegionalKey(name, region))) } // GetRegionAddress returns the region address by name -func (gce *GCECloud) GetRegionAddress(name, region string) (*compute.Address, error) { +func (g *Cloud) GetRegionAddress(name, region string) (*compute.Address, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("get", region) - v, err := gce.c.Addresses().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.Addresses().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // GetAlphaRegionAddress returns the Alpha, regional address by name. -func (gce *GCECloud) GetAlphaRegionAddress(name, region string) (*computealpha.Address, error) { +func (g *Cloud) GetAlphaRegionAddress(name, region string) (*computealpha.Address, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("get", region) - v, err := gce.c.AlphaAddresses().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.AlphaAddresses().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // GetBetaRegionAddress returns the beta region address by name -func (gce *GCECloud) GetBetaRegionAddress(name, region string) (*computebeta.Address, error) { +func (g *Cloud) GetBetaRegionAddress(name, region string) (*computebeta.Address, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("get", region) - v, err := gce.c.BetaAddresses().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.BetaAddresses().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // GetRegionAddressByIP returns the regional address matching the given IP address. -func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Address, error) { +func (g *Cloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Address, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("list", region) - addrs, err := gce.c.Addresses().List(ctx, region, filter.Regexp("address", ipAddress)) + addrs, err := g.c.Addresses().List(ctx, region, filter.Regexp("address", ipAddress)) mc.Observe(err) if err != nil { @@ -149,7 +149,7 @@ func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Ad } if len(addrs) > 1 { - glog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs)) + klog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs)) } for _, addr := range addrs { if addr.Address == ipAddress { @@ -160,12 +160,12 @@ func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Ad } // GetBetaRegionAddressByIP returns the beta regional address matching the given IP address. -func (gce *GCECloud) GetBetaRegionAddressByIP(region, ipAddress string) (*computebeta.Address, error) { +func (g *Cloud) GetBetaRegionAddressByIP(region, ipAddress string) (*computebeta.Address, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("list", region) - addrs, err := gce.c.BetaAddresses().List(ctx, region, filter.Regexp("address", ipAddress)) + addrs, err := g.c.BetaAddresses().List(ctx, region, filter.Regexp("address", ipAddress)) mc.Observe(err) if err != nil { @@ -173,7 +173,7 @@ func (gce *GCECloud) GetBetaRegionAddressByIP(region, ipAddress string) (*comput } if len(addrs) > 1 { - glog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs)) + klog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs)) } for _, addr := range addrs { if addr.Address == ipAddress { @@ -184,11 +184,11 @@ func (gce *GCECloud) GetBetaRegionAddressByIP(region, ipAddress string) (*comput } // TODO(#51665): retire this function once Network Tiers becomes Beta in GCP. -func (gce *GCECloud) getNetworkTierFromAddress(name, region string) (string, error) { - if !gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { +func (g *Cloud) getNetworkTierFromAddress(name, region string) (string, error) { + if !g.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { return cloud.NetworkTierDefault.ToGCEValue(), nil } - addr, err := gce.GetAlphaRegionAddress(name, region) + addr, err := g.GetAlphaRegionAddress(name, region) if err != nil { return handleAlphaNetworkTierGetError(err) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go index 0ce698aa98e9b..598f1e336d3f3 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go @@ -21,21 +21,24 @@ import ( ) const ( - // alpha: v1.8 (for Services) + // AlphaFeatureNetworkTiers allows Services backed by a GCP load balancer to choose + // what network tier to use. Currently supports "Standard" and "Premium" (default). // - // Allows Services backed by a GCP load balancer to choose what network - // tier to use. Currently supports "Standard" and "Premium" (default). + // alpha: v1.8 (for Services) AlphaFeatureNetworkTiers = "NetworkTiers" ) +// AlphaFeatureGate contains a mapping of alpha features to whether they are enabled type AlphaFeatureGate struct { features map[string]bool } +// Enabled returns true if the provided alpha feature is enabled func (af *AlphaFeatureGate) Enabled(key string) bool { return af.features[key] } +// NewAlphaFeatureGate marks the provided alpha features as enabled func NewAlphaFeatureGate(features []string) *AlphaFeatureGate { featureMap := make(map[string]bool) for _, name := range features { @@ -44,9 +47,9 @@ func NewAlphaFeatureGate(features []string) *AlphaFeatureGate { return &AlphaFeatureGate{featureMap} } -func (gce *GCECloud) alphaFeatureEnabled(feature string) error { - if !gce.AlphaFeatureGate.Enabled(feature) { - return fmt.Errorf("alpha feature %q is not enabled.", feature) +func (g *Cloud) alphaFeatureEnabled(feature string) error { + if !g.AlphaFeatureGate.Enabled(feature) { + return fmt.Errorf("alpha feature %q is not enabled", feature) } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go index ccc0f44411813..39e632e0795f3 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go @@ -19,12 +19,13 @@ package gce import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" ) +// LoadBalancerType defines a specific type for holding load balancer types (eg. Internal) type LoadBalancerType string const ( @@ -33,23 +34,30 @@ const ( // Currently, only "internal" is supported. ServiceAnnotationLoadBalancerType = "cloud.google.com/load-balancer-type" + // LBTypeInternal is the constant for the official internal type. LBTypeInternal LoadBalancerType = "Internal" + // Deprecating the lowercase spelling of Internal. deprecatedTypeInternalLowerCase LoadBalancerType = "internal" - // ServiceAnnotationInternalBackendShare is annotated on a service with "true" when users + // ServiceAnnotationILBBackendShare is annotated on a service with "true" when users // want to share GCP Backend Services for a set of internal load balancers. // ALPHA feature - this may be removed in a future release. ServiceAnnotationILBBackendShare = "alpha.cloud.google.com/load-balancer-backend-share" + // This annotation did not correctly specify "alpha", so both annotations will be checked. deprecatedServiceAnnotationILBBackendShare = "cloud.google.com/load-balancer-backend-share" // NetworkTierAnnotationKey is annotated on a Service object to indicate which // network tier a GCP LB should use. The valid values are "Standard" and // "Premium" (default). - NetworkTierAnnotationKey = "cloud.google.com/network-tier" + NetworkTierAnnotationKey = "cloud.google.com/network-tier" + + // NetworkTierAnnotationStandard is an annotation to indicate the Service is on the Standard network tier NetworkTierAnnotationStandard = cloud.NetworkTierStandard - NetworkTierAnnotationPremium = cloud.NetworkTierPremium + + // NetworkTierAnnotationPremium is an annotation to indicate the Service is on the Premium network tier + NetworkTierAnnotationPremium = cloud.NetworkTierPremium ) // GetLoadBalancerAnnotationType returns the type of GCP load balancer which should be assembled. @@ -82,7 +90,7 @@ func GetLoadBalancerAnnotationBackendShare(service *v1.Service) bool { // Check for deprecated annotation key if l, exists := service.Annotations[deprecatedServiceAnnotationILBBackendShare]; exists && l == "true" { - glog.Warningf("Annotation %q is deprecated and replaced with an alpha-specific key: %q", deprecatedServiceAnnotationILBBackendShare, ServiceAnnotationILBBackendShare) + klog.Warningf("Annotation %q is deprecated and replaced with an alpha-specific key: %q", deprecatedServiceAnnotationILBBackendShare, ServiceAnnotationILBBackendShare) return true } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go index 23dc3bf1cb3c5..6560c91260681 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go @@ -35,201 +35,201 @@ func newBackendServiceMetricContextWithVersion(request, region, version string) } // GetGlobalBackendService retrieves a backend by name. -func (gce *GCECloud) GetGlobalBackendService(name string) (*compute.BackendService, error) { +func (g *Cloud) GetGlobalBackendService(name string) (*compute.BackendService, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("get", "") - v, err := gce.c.BackendServices().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.BackendServices().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // GetBetaGlobalBackendService retrieves beta backend by name. -func (gce *GCECloud) GetBetaGlobalBackendService(name string) (*computebeta.BackendService, error) { +func (g *Cloud) GetBetaGlobalBackendService(name string) (*computebeta.BackendService, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("get", "", computeBetaVersion) - v, err := gce.c.BetaBackendServices().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.BetaBackendServices().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // GetAlphaGlobalBackendService retrieves alpha backend by name. -func (gce *GCECloud) GetAlphaGlobalBackendService(name string) (*computealpha.BackendService, error) { +func (g *Cloud) GetAlphaGlobalBackendService(name string) (*computealpha.BackendService, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("get", "", computeAlphaVersion) - v, err := gce.c.AlphaBackendServices().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.AlphaBackendServices().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // UpdateGlobalBackendService applies the given BackendService as an update to // an existing service. -func (gce *GCECloud) UpdateGlobalBackendService(bg *compute.BackendService) error { +func (g *Cloud) UpdateGlobalBackendService(bg *compute.BackendService) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("update", "") - return mc.Observe(gce.c.BackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg)) + return mc.Observe(g.c.BackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg)) } // UpdateBetaGlobalBackendService applies the given beta BackendService as an // update to an existing service. -func (gce *GCECloud) UpdateBetaGlobalBackendService(bg *computebeta.BackendService) error { +func (g *Cloud) UpdateBetaGlobalBackendService(bg *computebeta.BackendService) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("update", "", computeBetaVersion) - return mc.Observe(gce.c.BetaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg)) + return mc.Observe(g.c.BetaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg)) } // UpdateAlphaGlobalBackendService applies the given alpha BackendService as an // update to an existing service. -func (gce *GCECloud) UpdateAlphaGlobalBackendService(bg *computealpha.BackendService) error { +func (g *Cloud) UpdateAlphaGlobalBackendService(bg *computealpha.BackendService) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("update", "", computeAlphaVersion) - return mc.Observe(gce.c.AlphaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg)) + return mc.Observe(g.c.AlphaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg)) } // DeleteGlobalBackendService deletes the given BackendService by name. -func (gce *GCECloud) DeleteGlobalBackendService(name string) error { +func (g *Cloud) DeleteGlobalBackendService(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("delete", "") - return mc.Observe(gce.c.BackendServices().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.BackendServices().Delete(ctx, meta.GlobalKey(name))) } // CreateGlobalBackendService creates the given BackendService. -func (gce *GCECloud) CreateGlobalBackendService(bg *compute.BackendService) error { +func (g *Cloud) CreateGlobalBackendService(bg *compute.BackendService) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("create", "") - return mc.Observe(gce.c.BackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg)) + return mc.Observe(g.c.BackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg)) } // CreateBetaGlobalBackendService creates the given beta BackendService. -func (gce *GCECloud) CreateBetaGlobalBackendService(bg *computebeta.BackendService) error { +func (g *Cloud) CreateBetaGlobalBackendService(bg *computebeta.BackendService) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("create", "", computeBetaVersion) - return mc.Observe(gce.c.BetaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg)) + return mc.Observe(g.c.BetaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg)) } // CreateAlphaGlobalBackendService creates the given alpha BackendService. -func (gce *GCECloud) CreateAlphaGlobalBackendService(bg *computealpha.BackendService) error { +func (g *Cloud) CreateAlphaGlobalBackendService(bg *computealpha.BackendService) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("create", "", computeAlphaVersion) - return mc.Observe(gce.c.AlphaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg)) + return mc.Observe(g.c.AlphaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg)) } // ListGlobalBackendServices lists all backend services in the project. -func (gce *GCECloud) ListGlobalBackendServices() ([]*compute.BackendService, error) { +func (g *Cloud) ListGlobalBackendServices() ([]*compute.BackendService, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("list", "") - v, err := gce.c.BackendServices().List(ctx, filter.None) + v, err := g.c.BackendServices().List(ctx, filter.None) return v, mc.Observe(err) } // GetGlobalBackendServiceHealth returns the health of the BackendService // identified by the given name, in the given instanceGroup. The // instanceGroupLink is the fully qualified self link of an instance group. -func (gce *GCECloud) GetGlobalBackendServiceHealth(name string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) { +func (g *Cloud) GetGlobalBackendServiceHealth(name string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("get_health", "") groupRef := &compute.ResourceGroupReference{Group: instanceGroupLink} - v, err := gce.c.BackendServices().GetHealth(ctx, meta.GlobalKey(name), groupRef) + v, err := g.c.BackendServices().GetHealth(ctx, meta.GlobalKey(name), groupRef) return v, mc.Observe(err) } // GetRegionBackendService retrieves a backend by name. -func (gce *GCECloud) GetRegionBackendService(name, region string) (*compute.BackendService, error) { +func (g *Cloud) GetRegionBackendService(name, region string) (*compute.BackendService, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("get", region) - v, err := gce.c.RegionBackendServices().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.RegionBackendServices().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // UpdateRegionBackendService applies the given BackendService as an update to // an existing service. -func (gce *GCECloud) UpdateRegionBackendService(bg *compute.BackendService, region string) error { +func (g *Cloud) UpdateRegionBackendService(bg *compute.BackendService, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("update", region) - return mc.Observe(gce.c.RegionBackendServices().Update(ctx, meta.RegionalKey(bg.Name, region), bg)) + return mc.Observe(g.c.RegionBackendServices().Update(ctx, meta.RegionalKey(bg.Name, region), bg)) } // DeleteRegionBackendService deletes the given BackendService by name. -func (gce *GCECloud) DeleteRegionBackendService(name, region string) error { +func (g *Cloud) DeleteRegionBackendService(name, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("delete", region) - return mc.Observe(gce.c.RegionBackendServices().Delete(ctx, meta.RegionalKey(name, region))) + return mc.Observe(g.c.RegionBackendServices().Delete(ctx, meta.RegionalKey(name, region))) } // CreateRegionBackendService creates the given BackendService. -func (gce *GCECloud) CreateRegionBackendService(bg *compute.BackendService, region string) error { +func (g *Cloud) CreateRegionBackendService(bg *compute.BackendService, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("create", region) - return mc.Observe(gce.c.RegionBackendServices().Insert(ctx, meta.RegionalKey(bg.Name, region), bg)) + return mc.Observe(g.c.RegionBackendServices().Insert(ctx, meta.RegionalKey(bg.Name, region), bg)) } // ListRegionBackendServices lists all backend services in the project. -func (gce *GCECloud) ListRegionBackendServices(region string) ([]*compute.BackendService, error) { +func (g *Cloud) ListRegionBackendServices(region string) ([]*compute.BackendService, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("list", region) - v, err := gce.c.RegionBackendServices().List(ctx, region, filter.None) + v, err := g.c.RegionBackendServices().List(ctx, region, filter.None) return v, mc.Observe(err) } // GetRegionalBackendServiceHealth returns the health of the BackendService // identified by the given name, in the given instanceGroup. The // instanceGroupLink is the fully qualified self link of an instance group. -func (gce *GCECloud) GetRegionalBackendServiceHealth(name, region string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) { +func (g *Cloud) GetRegionalBackendServiceHealth(name, region string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("get_health", region) ref := &compute.ResourceGroupReference{Group: instanceGroupLink} - v, err := gce.c.RegionBackendServices().GetHealth(ctx, meta.RegionalKey(name, region), ref) + v, err := g.c.RegionBackendServices().GetHealth(ctx, meta.RegionalKey(name, region), ref) return v, mc.Observe(err) } // SetSecurityPolicyForBetaGlobalBackendService sets the given // SecurityPolicyReference for the BackendService identified by the given name. -func (gce *GCECloud) SetSecurityPolicyForBetaGlobalBackendService(backendServiceName string, securityPolicyReference *computebeta.SecurityPolicyReference) error { +func (g *Cloud) SetSecurityPolicyForBetaGlobalBackendService(backendServiceName string, securityPolicyReference *computebeta.SecurityPolicyReference) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("set_security_policy", "", computeBetaVersion) - return mc.Observe(gce.c.BetaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference)) + return mc.Observe(g.c.BetaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference)) } // SetSecurityPolicyForAlphaGlobalBackendService sets the given // SecurityPolicyReference for the BackendService identified by the given name. -func (gce *GCECloud) SetSecurityPolicyForAlphaGlobalBackendService(backendServiceName string, securityPolicyReference *computealpha.SecurityPolicyReference) error { +func (g *Cloud) SetSecurityPolicyForAlphaGlobalBackendService(backendServiceName string, securityPolicyReference *computealpha.SecurityPolicyReference) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("set_security_policy", "", computeAlphaVersion) - return mc.Observe(gce.c.AlphaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference)) + return mc.Observe(g.c.AlphaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference)) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go index 3b6614f816f30..5153f067e8a19 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go @@ -29,43 +29,43 @@ func newCertMetricContext(request string) *metricContext { } // GetSslCertificate returns the SslCertificate by name. -func (gce *GCECloud) GetSslCertificate(name string) (*compute.SslCertificate, error) { +func (g *Cloud) GetSslCertificate(name string) (*compute.SslCertificate, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newCertMetricContext("get") - v, err := gce.c.SslCertificates().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.SslCertificates().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // CreateSslCertificate creates and returns a SslCertificate. -func (gce *GCECloud) CreateSslCertificate(sslCerts *compute.SslCertificate) (*compute.SslCertificate, error) { +func (g *Cloud) CreateSslCertificate(sslCerts *compute.SslCertificate) (*compute.SslCertificate, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newCertMetricContext("create") - err := gce.c.SslCertificates().Insert(ctx, meta.GlobalKey(sslCerts.Name), sslCerts) + err := g.c.SslCertificates().Insert(ctx, meta.GlobalKey(sslCerts.Name), sslCerts) if err != nil { return nil, mc.Observe(err) } - return gce.GetSslCertificate(sslCerts.Name) + return g.GetSslCertificate(sslCerts.Name) } // DeleteSslCertificate deletes the SslCertificate by name. -func (gce *GCECloud) DeleteSslCertificate(name string) error { +func (g *Cloud) DeleteSslCertificate(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newCertMetricContext("delete") - return mc.Observe(gce.c.SslCertificates().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.SslCertificates().Delete(ctx, meta.GlobalKey(name))) } // ListSslCertificates lists all SslCertificates in the project. -func (gce *GCECloud) ListSslCertificates() ([]*compute.SslCertificate, error) { +func (g *Cloud) ListSslCertificates() ([]*compute.SslCertificate, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newCertMetricContext("list") - v, err := gce.c.SslCertificates().List(ctx, filter.None) + v, err := g.c.SslCertificates().List(ctx, filter.None) return v, mc.Observe(err) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go index 46b4ff4f6a0c3..2f40167788d84 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go @@ -25,7 +25,6 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -33,22 +32,31 @@ import ( "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" + "k8s.io/klog" ) const ( - // Key used to persist UIDs to configmaps. + // UIDConfigMapName is the Key used to persist UIDs to configmaps. UIDConfigMapName = "ingress-uid" - // Namespace which contains the above config map + + // UIDNamespace is the namespace which contains the above config map UIDNamespace = metav1.NamespaceSystem - // Data keys for the specific ids - UIDCluster = "uid" - UIDProvider = "provider-uid" + + // UIDCluster is the data keys for looking up the clusters UID + UIDCluster = "uid" + + // UIDProvider is the data keys for looking up the providers UID + UIDProvider = "provider-uid" + + // UIDLengthBytes is the length of a UID UIDLengthBytes = 8 + // Frequency of the updateFunc event handler being called // This does not actually query the apiserver for current state - the local cache value is used. updateFuncFrequency = 10 * time.Minute ) +// ClusterID is the struct for maintaining information about this cluster's ID type ClusterID struct { idLock sync.RWMutex client clientset.Interface @@ -59,17 +67,17 @@ type ClusterID struct { } // Continually watches for changes to the cluster id config map -func (gce *GCECloud) watchClusterID() { - gce.ClusterID = ClusterID{ +func (g *Cloud) watchClusterID(stop <-chan struct{}) { + g.ClusterID = ClusterID{ cfgMapKey: fmt.Sprintf("%v/%v", UIDNamespace, UIDConfigMapName), - client: gce.client, + client: g.client, } mapEventHandler := cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { m, ok := obj.(*v1.ConfigMap) if !ok || m == nil { - glog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", obj, ok) + klog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", obj, ok) return } if m.Namespace != UIDNamespace || @@ -77,13 +85,13 @@ func (gce *GCECloud) watchClusterID() { return } - glog.V(4).Infof("Observed new configmap for clusteriD: %v, %v; setting local values", m.Name, m.Data) - gce.ClusterID.update(m) + klog.V(4).Infof("Observed new configmap for clusteriD: %v, %v; setting local values", m.Name, m.Data) + g.ClusterID.update(m) }, UpdateFunc: func(old, cur interface{}) { m, ok := cur.(*v1.ConfigMap) if !ok || m == nil { - glog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", cur, ok) + klog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", cur, ok) return } @@ -96,16 +104,16 @@ func (gce *GCECloud) watchClusterID() { return } - glog.V(4).Infof("Observed updated configmap for clusteriD %v, %v; setting local values", m.Name, m.Data) - gce.ClusterID.update(m) + klog.V(4).Infof("Observed updated configmap for clusteriD %v, %v; setting local values", m.Name, m.Data) + g.ClusterID.update(m) }, } - listerWatcher := cache.NewListWatchFromClient(gce.ClusterID.client.CoreV1().RESTClient(), "configmaps", UIDNamespace, fields.Everything()) + listerWatcher := cache.NewListWatchFromClient(g.ClusterID.client.CoreV1().RESTClient(), "configmaps", UIDNamespace, fields.Everything()) var controller cache.Controller - gce.ClusterID.store, controller = cache.NewInformer(newSingleObjectListerWatcher(listerWatcher, UIDConfigMapName), &v1.ConfigMap{}, updateFuncFrequency, mapEventHandler) + g.ClusterID.store, controller = cache.NewInformer(newSingleObjectListerWatcher(listerWatcher, UIDConfigMapName), &v1.ConfigMap{}, updateFuncFrequency, mapEventHandler) - controller.Run(nil) + controller.Run(stop) } // GetID returns the id which is unique to this cluster @@ -131,9 +139,9 @@ func (ci *ClusterID) GetID() (string, error) { return *ci.clusterID, nil } -// GetFederationId returns the id which could represent the entire Federation +// GetFederationID returns the id which could represent the entire Federation // or just the cluster if not federated. -func (ci *ClusterID) GetFederationId() (string, bool, error) { +func (ci *ClusterID) GetFederationID() (string, bool, error) { if err := ci.getOrInitialize(); err != nil { return "", false, err } @@ -141,7 +149,7 @@ func (ci *ClusterID) GetFederationId() (string, bool, error) { ci.idLock.RLock() defer ci.idLock.RUnlock() if ci.clusterID == nil { - return "", false, errors.New("Could not retrieve cluster id") + return "", false, errors.New("could not retrieve cluster id") } // If provider ID is not set, return false @@ -157,7 +165,7 @@ func (ci *ClusterID) GetFederationId() (string, bool, error) { // before the watch has begun. func (ci *ClusterID) getOrInitialize() error { if ci.store == nil { - return errors.New("GCECloud.ClusterID is not ready. Call Initialize() before using.") + return errors.New("Cloud.ClusterID is not ready. Call Initialize() before using") } if ci.clusterID != nil { @@ -172,12 +180,12 @@ func (ci *ClusterID) getOrInitialize() error { } // The configmap does not exist - let's try creating one. - newId, err := makeUID() + newID, err := makeUID() if err != nil { return err } - glog.V(4).Infof("Creating clusteriD: %v", newId) + klog.V(4).Infof("Creating clusteriD: %v", newID) cfg := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: UIDConfigMapName, @@ -185,16 +193,16 @@ func (ci *ClusterID) getOrInitialize() error { }, } cfg.Data = map[string]string{ - UIDCluster: newId, - UIDProvider: newId, + UIDCluster: newID, + UIDProvider: newID, } if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(cfg); err != nil { - glog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err) + klog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err) return err } - glog.V(2).Infof("Created a config map containing clusteriD: %v", newId) + klog.V(2).Infof("Created a config map containing clusteriD: %v", newID) ci.update(cfg) return nil } @@ -211,7 +219,7 @@ func (ci *ClusterID) getConfigMap() (bool, error) { m, ok := item.(*v1.ConfigMap) if !ok || m == nil { err = fmt.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", item, ok) - glog.Error(err) + klog.Error(err) return false, err } ci.update(m) @@ -224,8 +232,8 @@ func (ci *ClusterID) update(m *v1.ConfigMap) { if clusterID, exists := m.Data[UIDCluster]; exists { ci.clusterID = &clusterID } - if provId, exists := m.Data[UIDProvider]; exists { - ci.providerID = &provId + if provID, exists := m.Data[UIDProvider]; exists { + ci.providerID = &provID } } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go index 63b4bbeb69d4c..379f5396a253f 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go @@ -18,22 +18,22 @@ package gce import ( "context" - "errors" "fmt" - "github.com/golang/glog" - container "google.golang.org/api/container/v1" + "google.golang.org/api/container/v1" + "k8s.io/klog" ) func newClustersMetricContext(request, zone string) *metricContext { return newGenericMetricContext("clusters", request, unusedMetricLabel, zone, computeV1Version) } -func (gce *GCECloud) ListClusters(ctx context.Context) ([]string, error) { +// ListClusters will return a list of cluster names for the associated project +func (g *Cloud) ListClusters(ctx context.Context) ([]string, error) { allClusters := []string{} - for _, zone := range gce.managedZones { - clusters, err := gce.listClustersInZone(zone) + for _, zone := range g.managedZones { + clusters, err := g.listClustersInZone(zone) if err != nil { return nil, err } @@ -44,36 +44,38 @@ func (gce *GCECloud) ListClusters(ctx context.Context) ([]string, error) { return allClusters, nil } -func (gce *GCECloud) GetManagedClusters(ctx context.Context) ([]*container.Cluster, error) { +// GetManagedClusters will return the cluster objects associated to this project +func (g *Cloud) GetManagedClusters(ctx context.Context) ([]*container.Cluster, error) { managedClusters := []*container.Cluster{} - if gce.regional { + if g.regional { var err error - managedClusters, err = gce.getClustersInLocation(gce.region) + managedClusters, err = g.getClustersInLocation(g.region) if err != nil { return nil, err } - } else if len(gce.managedZones) >= 1 { - for _, zone := range gce.managedZones { - clusters, err := gce.getClustersInLocation(zone) + } else if len(g.managedZones) >= 1 { + for _, zone := range g.managedZones { + clusters, err := g.getClustersInLocation(zone) if err != nil { return nil, err } managedClusters = append(managedClusters, clusters...) } } else { - return nil, errors.New(fmt.Sprintf("no zones associated with this cluster(%s)", gce.ProjectID())) + return nil, fmt.Errorf("no zones associated with this cluster(%s)", g.ProjectID()) } return managedClusters, nil } -func (gce *GCECloud) Master(ctx context.Context, clusterName string) (string, error) { +// Master returned the dns address of the master +func (g *Cloud) Master(ctx context.Context, clusterName string) (string, error) { return "k8s-" + clusterName + "-master.internal", nil } -func (gce *GCECloud) listClustersInZone(zone string) ([]string, error) { - clusters, err := gce.getClustersInLocation(zone) +func (g *Cloud) listClustersInZone(zone string) ([]string, error) { + clusters, err := g.getClustersInLocation(zone) if err != nil { return nil, err } @@ -85,17 +87,17 @@ func (gce *GCECloud) listClustersInZone(zone string) ([]string, error) { return result, nil } -func (gce *GCECloud) getClustersInLocation(zoneOrRegion string) ([]*container.Cluster, error) { +func (g *Cloud) getClustersInLocation(zoneOrRegion string) ([]*container.Cluster, error) { // TODO: Issue/68913 migrate metric to list_location instead of list_zone. mc := newClustersMetricContext("list_zone", zoneOrRegion) // TODO: use PageToken to list all not just the first 500 - location := getLocationName(gce.projectID, zoneOrRegion) - list, err := gce.containerService.Projects.Locations.Clusters.List(location).Do() + location := getLocationName(g.projectID, zoneOrRegion) + list, err := g.containerService.Projects.Locations.Clusters.List(location).Do() if err != nil { return nil, mc.Observe(err) } if list.Header.Get("nextPageToken") != "" { - glog.Errorf("Failed to get all clusters for request, received next page token %s", list.Header.Get("nextPageToken")) + klog.Errorf("Failed to get all clusters for request, received next page token %s", list.Header.Get("nextPageToken")) } return list.Clusters, mc.Observe(nil) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go index 3687b1e7f2814..7e10e50c32bb2 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go @@ -28,25 +28,28 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "github.com/golang/glog" - computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" "k8s.io/kubernetes/pkg/features" ) +// DiskType defines a specific type for holding disk types (eg. pd-ssd) type DiskType string const ( - DiskTypeSSD = "pd-ssd" + // DiskTypeSSD the type for persistent SSD storage + DiskTypeSSD = "pd-ssd" + + // DiskTypeStandard the type for standard persistent storage DiskTypeStandard = "pd-standard" diskTypeDefault = DiskTypeStandard @@ -85,7 +88,7 @@ type diskServiceManager interface { // Attach a persistent disk on GCE with the given disk spec to the specified instance. AttachDiskOnCloudProvider( - disk *GCEDisk, + disk *Disk, readWrite string, instanceZone string, instanceName string) error @@ -96,18 +99,18 @@ type diskServiceManager interface { instanceName string, devicePath string) error - ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) error - RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) error + ResizeDiskOnCloudProvider(disk *Disk, sizeGb int64, zone string) error + RegionalResizeDiskOnCloudProvider(disk *Disk, sizeGb int64) error // Gets the persistent disk from GCE with the given diskName. - GetDiskFromCloudProvider(zone string, diskName string) (*GCEDisk, error) + GetDiskFromCloudProvider(zone string, diskName string) (*Disk, error) // Gets the regional persistent disk from GCE with the given diskName. - GetRegionalDiskFromCloudProvider(diskName string) (*GCEDisk, error) + GetRegionalDiskFromCloudProvider(diskName string) (*Disk, error) } type gceServiceManager struct { - gce *GCECloud + gce *Cloud } var _ diskServiceManager = &gceServiceManager{} @@ -119,7 +122,7 @@ func (manager *gceServiceManager) CreateDiskOnCloudProvider( diskType string, zone string) error { diskTypeURI, err := manager.getDiskTypeURI( - manager.gce.region /* diskRegion */, singleZone{zone}, diskType, false /* useBetaAPI */) + manager.gce.region /* diskRegion */, singleZone{zone}, diskType) if err != nil { return err } @@ -148,17 +151,17 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider( } diskTypeURI, err := manager.getDiskTypeURI( - manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType, true /* useBetaAPI */) + manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType) if err != nil { return err } fullyQualifiedReplicaZones := []string{} for _, replicaZone := range replicaZones.UnsortedList() { fullyQualifiedReplicaZones = append( - fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone, true)) + fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone)) } - diskToCreateBeta := &computebeta.Disk{ + diskToCreate := &compute.Disk{ Name: name, SizeGb: sizeGb, Description: tagsStr, @@ -168,11 +171,11 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider( ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - return manager.gce.c.BetaRegionDisks().Insert(ctx, meta.RegionalKey(name, manager.gce.region), diskToCreateBeta) + return manager.gce.c.RegionDisks().Insert(ctx, meta.RegionalKey(name, manager.gce.region), diskToCreate) } func (manager *gceServiceManager) AttachDiskOnCloudProvider( - disk *GCEDisk, + disk *Disk, readWrite string, instanceZone string, instanceName string) error { @@ -205,13 +208,13 @@ func (manager *gceServiceManager) DetachDiskOnCloudProvider( func (manager *gceServiceManager) GetDiskFromCloudProvider( zone string, - diskName string) (*GCEDisk, error) { + diskName string) (*Disk, error) { if zone == "" { - return nil, fmt.Errorf("Can not fetch disk %q. Zone is empty.", diskName) + return nil, fmt.Errorf("can not fetch disk %q, zone is empty", diskName) } if diskName == "" { - return nil, fmt.Errorf("Can not fetch disk. Zone is specified (%q). But disk name is empty.", zone) + return nil, fmt.Errorf("can not fetch disk, zone is specified (%q), but disk name is empty", zone) } ctx, cancel := cloud.ContextWithCallTimeout() @@ -231,7 +234,7 @@ func (manager *gceServiceManager) GetDiskFromCloudProvider( return nil, fmt.Errorf("failed to extract region from zone for %q/%q err=%v", zone, diskName, err) } - return &GCEDisk{ + return &Disk{ ZoneInfo: zoneInfo, Region: region, Name: diskStable.Name, @@ -242,7 +245,7 @@ func (manager *gceServiceManager) GetDiskFromCloudProvider( } func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider( - diskName string) (*GCEDisk, error) { + diskName string) (*Disk, error) { if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk) @@ -250,7 +253,7 @@ func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider( ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - diskBeta, err := manager.gce.c.BetaRegionDisks().Get(ctx, meta.RegionalKey(diskName, manager.gce.region)) + diskBeta, err := manager.gce.c.RegionDisks().Get(ctx, meta.RegionalKey(diskName, manager.gce.region)) if err != nil { return nil, err } @@ -260,7 +263,7 @@ func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider( zones.Insert(lastComponent(zoneURI)) } - return &GCEDisk{ + return &Disk{ ZoneInfo: multiZone{zones}, Region: lastComponent(diskBeta.Region), Name: diskBeta.Name, @@ -287,10 +290,10 @@ func (manager *gceServiceManager) DeleteRegionalDiskOnCloudProvider( ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - return manager.gce.c.BetaRegionDisks().Delete(ctx, meta.RegionalKey(diskName, manager.gce.region)) + return manager.gce.c.RegionDisks().Delete(ctx, meta.RegionalKey(diskName, manager.gce.region)) } -func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error) { +func (manager *gceServiceManager) getDiskSourceURI(disk *Disk) (string, error) { getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint() switch zoneInfo := disk.ZoneInfo.(type) { @@ -325,14 +328,9 @@ func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error } func (manager *gceServiceManager) getDiskTypeURI( - diskRegion string, diskZoneInfo zoneType, diskType string, useBetaAPI bool) (string, error) { + diskRegion string, diskZoneInfo zoneType, diskType string) (string, error) { - var getProjectsAPIEndpoint string - if useBetaAPI { - getProjectsAPIEndpoint = manager.getProjectsAPIEndpointBeta() - } else { - getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint() - } + getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint() switch zoneInfo := diskZoneInfo.(type) { case singleZone: @@ -361,15 +359,8 @@ func (manager *gceServiceManager) getDiskTypeURI( } } -func (manager *gceServiceManager) getReplicaZoneURI(zone string, useBetaAPI bool) string { - var getProjectsAPIEndpoint string - if useBetaAPI { - getProjectsAPIEndpoint = manager.getProjectsAPIEndpointBeta() - } else { - getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint() - } - - return getProjectsAPIEndpoint + fmt.Sprintf( +func (manager *gceServiceManager) getReplicaZoneURI(zone string) string { + return manager.getProjectsAPIEndpoint() + fmt.Sprintf( replicaZoneURITemplateSingleZone, manager.gce.projectID, zone) @@ -402,14 +393,14 @@ func (manager *gceServiceManager) getRegionFromZone(zoneInfo zoneType) (string, region, err := GetGCERegion(zone) if err != nil { - glog.Warningf("failed to parse GCE region from zone %q: %v", zone, err) + klog.Warningf("failed to parse GCE region from zone %q: %v", zone, err) region = manager.gce.region } return region, nil } -func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) error { +func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *Disk, sizeGb int64, zone string) error { resizeServiceRequest := &compute.DisksResizeRequest{ SizeGb: sizeGb, } @@ -419,18 +410,18 @@ func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeG return manager.gce.c.Disks().Resize(ctx, meta.ZonalKey(disk.Name, zone), resizeServiceRequest) } -func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) error { +func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *Disk, sizeGb int64) error { if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { return fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk) } - resizeServiceRequest := &computebeta.RegionDisksResizeRequest{ + resizeServiceRequest := &compute.RegionDisksResizeRequest{ SizeGb: sizeGb, } ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - return manager.gce.c.BetaRegionDisks().Resize(ctx, meta.RegionalKey(disk.Name, disk.Region), resizeServiceRequest) + return manager.gce.c.RegionDisks().Resize(ctx, meta.RegionalKey(disk.Name, disk.Region), resizeServiceRequest) } // Disks is interface for manipulation with GCE PDs. @@ -472,13 +463,14 @@ type Disks interface { GetAutoLabelsForPD(name string, zone string) (map[string]string, error) } -// GCECloud implements Disks. -var _ Disks = (*GCECloud)(nil) +// Cloud implements Disks. +var _ Disks = (*Cloud)(nil) -// GCECloud implements PVLabeler. -var _ cloudprovider.PVLabeler = (*GCECloud)(nil) +// Cloud implements PVLabeler. +var _ cloudprovider.PVLabeler = (*Cloud)(nil) -type GCEDisk struct { +// Disk holds all relevant data about an instance of GCE storage +type Disk struct { ZoneInfo zoneType Region string Name string @@ -510,7 +502,8 @@ func newDiskMetricContextRegional(request, region string) *metricContext { return newGenericMetricContext("disk", request, region, unusedMetricLabel, computeV1Version) } -func (gce *GCECloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) { +// GetLabelsForVolume retrieved the label info for the provided volume +func (g *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) { // Ignore any volumes that are being provisioned if pv.Spec.GCEPersistentDisk.PDName == volume.ProvisionedVolumeName { return nil, nil @@ -519,7 +512,7 @@ func (gce *GCECloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVo // If the zone is already labeled, honor the hint zone := pv.Labels[kubeletapis.LabelZoneFailureDomain] - labels, err := gce.GetAutoLabelsForPD(pv.Spec.GCEPersistentDisk.PDName, zone) + labels, err := g.GetAutoLabelsForPD(pv.Spec.GCEPersistentDisk.PDName, zone) if err != nil { return nil, err } @@ -527,28 +520,30 @@ func (gce *GCECloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVo return labels, nil } -func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error { +// AttachDisk attaches given disk to the node with the specified NodeName. +// Current instance is used when instanceID is empty string. +func (g *Cloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error { instanceName := mapNodeNameToInstanceName(nodeName) - instance, err := gce.getInstanceByName(instanceName) + instance, err := g.getInstanceByName(instanceName) if err != nil { return fmt.Errorf("error getting instance %q", instanceName) } // Try fetching as regional PD - var disk *GCEDisk + var disk *Disk var mc *metricContext if regional && utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { - disk, err = gce.getRegionalDiskByName(diskName) + disk, err = g.getRegionalDiskByName(diskName) if err != nil { return err } - mc = newDiskMetricContextRegional("attach", gce.region) + mc = newDiskMetricContextRegional("attach", g.region) } else { - disk, err = gce.getDiskByName(diskName, instance.Zone) + disk, err = g.getDiskByName(diskName, instance.Zone) if err != nil { return err } - mc = newDiskMetricContextZonal("attach", gce.region, instance.Zone) + mc = newDiskMetricContextZonal("attach", g.region, instance.Zone) } readWrite := "READ_WRITE" @@ -556,16 +551,18 @@ func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOn readWrite = "READ_ONLY" } - return mc.Observe(gce.manager.AttachDiskOnCloudProvider(disk, readWrite, instance.Zone, instance.Name)) + return mc.Observe(g.manager.AttachDiskOnCloudProvider(disk, readWrite, instance.Zone, instance.Name)) } -func (gce *GCECloud) DetachDisk(devicePath string, nodeName types.NodeName) error { +// DetachDisk detaches given disk to the node with the specified NodeName. +// Current instance is used when nodeName is empty string. +func (g *Cloud) DetachDisk(devicePath string, nodeName types.NodeName) error { instanceName := mapNodeNameToInstanceName(nodeName) - inst, err := gce.getInstanceByName(instanceName) + inst, err := g.getInstanceByName(instanceName) if err != nil { if err == cloudprovider.InstanceNotFound { // If instance no longer exists, safe to assume volume is not attached. - glog.Warningf( + klog.Warningf( "Instance %q does not exist. DetachDisk will assume PD %q is not attached to it.", instanceName, devicePath) @@ -575,17 +572,18 @@ func (gce *GCECloud) DetachDisk(devicePath string, nodeName types.NodeName) erro return fmt.Errorf("error getting instance %q", instanceName) } - mc := newDiskMetricContextZonal("detach", gce.region, inst.Zone) - return mc.Observe(gce.manager.DetachDiskOnCloudProvider(inst.Zone, inst.Name, devicePath)) + mc := newDiskMetricContextZonal("detach", g.region, inst.Zone) + return mc.Observe(g.manager.DetachDiskOnCloudProvider(inst.Zone, inst.Name, devicePath)) } -func (gce *GCECloud) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) { +// DiskIsAttached checks if a disk is attached to the node with the specified NodeName. +func (g *Cloud) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) { instanceName := mapNodeNameToInstanceName(nodeName) - instance, err := gce.getInstanceByName(instanceName) + instance, err := g.getInstanceByName(instanceName) if err != nil { if err == cloudprovider.InstanceNotFound { // If instance no longer exists, safe to assume volume is not attached. - glog.Warningf( + klog.Warningf( "Instance %q does not exist. DiskIsAttached will assume PD %q is not attached to it.", instanceName, diskName) @@ -605,17 +603,19 @@ func (gce *GCECloud) DiskIsAttached(diskName string, nodeName types.NodeName) (b return false, nil } -func (gce *GCECloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) { +// DisksAreAttached is a batch function to check if a list of disks are attached +// to the node with the specified NodeName. +func (g *Cloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) { attached := make(map[string]bool) for _, diskName := range diskNames { attached[diskName] = false } instanceName := mapNodeNameToInstanceName(nodeName) - instance, err := gce.getInstanceByName(instanceName) + instance, err := g.getInstanceByName(instanceName) if err != nil { if err == cloudprovider.InstanceNotFound { // If instance no longer exists, safe to assume volume is not attached. - glog.Warningf( + klog.Warningf( "Instance %q does not exist. DisksAreAttached will assume PD %v are not attached to it.", instanceName, diskNames) @@ -640,11 +640,11 @@ func (gce *GCECloud) DisksAreAttached(diskNames []string, nodeName types.NodeNam // CreateDisk creates a new Persistent Disk, with the specified name & // size, in the specified zone. It stores specified tags encoded in // JSON in Description field. -func (gce *GCECloud) CreateDisk( +func (g *Cloud) CreateDisk( name string, diskType string, zone string, sizeGb int64, tags map[string]string) error { // Do not allow creation of PDs in zones that are do not have nodes. Such PDs // are not currently usable. - curZones, err := gce.GetAllCurrentZones() + curZones, err := g.GetAllCurrentZones() if err != nil { return err } @@ -652,7 +652,7 @@ func (gce *GCECloud) CreateDisk( return fmt.Errorf("kubernetes does not have a node in zone %q", zone) } - tagsStr, err := gce.encodeDiskTags(tags) + tagsStr, err := g.encodeDiskTags(tags) if err != nil { return err } @@ -662,14 +662,14 @@ func (gce *GCECloud) CreateDisk( return err } - mc := newDiskMetricContextZonal("create", gce.region, zone) + mc := newDiskMetricContextZonal("create", g.region, zone) - err = gce.manager.CreateDiskOnCloudProvider( + err = g.manager.CreateDiskOnCloudProvider( name, sizeGb, tagsStr, diskType, zone) mc.Observe(err) if isGCEError(err, "alreadyExists") { - glog.Warningf("GCE PD %q already exists, reusing", name) + klog.Warningf("GCE PD %q already exists, reusing", name) return nil } return err @@ -678,14 +678,14 @@ func (gce *GCECloud) CreateDisk( // CreateRegionalDisk creates a new Regional Persistent Disk, with the specified // name & size, replicated to the specified zones. It stores specified tags // encoded in JSON in Description field. -func (gce *GCECloud) CreateRegionalDisk( +func (g *Cloud) CreateRegionalDisk( name string, diskType string, replicaZones sets.String, sizeGb int64, tags map[string]string) error { // Do not allow creation of PDs in zones that are do not have nodes. Such PDs // are not currently usable. This functionality should be reverted to checking // against managed zones if we want users to be able to create RegionalDisks // in zones where there are no nodes - curZones, err := gce.GetAllCurrentZones() + curZones, err := g.GetAllCurrentZones() if err != nil { return err } @@ -693,7 +693,7 @@ func (gce *GCECloud) CreateRegionalDisk( return fmt.Errorf("kubernetes does not have nodes in specified zones: %q. Zones that contain nodes: %q", replicaZones.Difference(curZones), curZones) } - tagsStr, err := gce.encodeDiskTags(tags) + tagsStr, err := g.encodeDiskTags(tags) if err != nil { return err } @@ -703,14 +703,14 @@ func (gce *GCECloud) CreateRegionalDisk( return err } - mc := newDiskMetricContextRegional("create", gce.region) + mc := newDiskMetricContextRegional("create", g.region) - err = gce.manager.CreateRegionalDiskOnCloudProvider( + err = g.manager.CreateRegionalDiskOnCloudProvider( name, sizeGb, tagsStr, diskType, replicaZones) mc.Observe(err) if isGCEError(err, "alreadyExists") { - glog.Warningf("GCE PD %q already exists, reusing", name) + klog.Warningf("GCE PD %q already exists, reusing", name) return nil } return err @@ -727,8 +727,9 @@ func getDiskType(diskType string) (string, error) { } } -func (gce *GCECloud) DeleteDisk(diskToDelete string) error { - err := gce.doDeleteDisk(diskToDelete) +// DeleteDisk deletes rgw referenced persistent disk. +func (g *Cloud) DeleteDisk(diskToDelete string) error { + err := g.doDeleteDisk(diskToDelete) if isGCEError(err, "resourceInUseByAnotherResource") { return volume.NewDeletedVolumeInUseError(err.Error()) } @@ -740,8 +741,8 @@ func (gce *GCECloud) DeleteDisk(diskToDelete string) error { } // ResizeDisk expands given disk and returns new disk size -func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) { - disk, err := gce.GetDiskByNameUnknownZone(diskToResize) +func (g *Cloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) { + disk, err := g.GetDiskByNameUnknownZone(diskToResize) if err != nil { return oldSize, err } @@ -760,26 +761,24 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, switch zoneInfo := disk.ZoneInfo.(type) { case singleZone: mc = newDiskMetricContextZonal("resize", disk.Region, zoneInfo.zone) - err := gce.manager.ResizeDiskOnCloudProvider(disk, requestGIB, zoneInfo.zone) + err := g.manager.ResizeDiskOnCloudProvider(disk, requestGIB, zoneInfo.zone) if err != nil { return oldSize, mc.Observe(err) - } else { - return newSizeQuant, mc.Observe(err) } + return newSizeQuant, mc.Observe(err) case multiZone: if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { return oldSize, fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo) } mc = newDiskMetricContextRegional("resize", disk.Region) - err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGIB) + err := g.manager.RegionalResizeDiskOnCloudProvider(disk, requestGIB) if err != nil { return oldSize, mc.Observe(err) - } else { - return newSizeQuant, mc.Observe(err) } + return newSizeQuant, mc.Observe(err) case nil: return oldSize, fmt.Errorf("PD has nil ZoneInfo: %v", disk) default: @@ -787,13 +786,13 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, } } -// Builds the labels that should be automatically added to a PersistentVolume backed by a GCE PD +// GetAutoLabelsForPD builds the labels that should be automatically added to a PersistentVolume backed by a GCE PD // Specifically, this builds FailureDomain (zone) and Region labels. // The PersistentVolumeLabel admission controller calls this and adds the labels when a PV is created. // If zone is specified, the volume will only be found in the specified zone, // otherwise all managed zones will be searched. -func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]string, error) { - var disk *GCEDisk +func (g *Cloud) GetAutoLabelsForPD(name string, zone string) (map[string]string, error) { + var disk *Disk var err error if zone == "" { // For regional PDs this is fine, but for zonal PDs we would like as far @@ -804,7 +803,7 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st // However, wherever possible the zone should be passed (and it is // passed for most cases that we can control, e.g. dynamic volume // provisioning). - disk, err = gce.GetDiskByNameUnknownZone(name) + disk, err = g.GetDiskByNameUnknownZone(name) if err != nil { return nil, err } @@ -815,24 +814,24 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { zoneSet, err := volumeutil.LabelZonesToSet(zone) if err != nil { - glog.Warningf("Failed to parse zone field: %q. Will use raw field.", zone) + klog.Warningf("Failed to parse zone field: %q. Will use raw field.", zone) } if len(zoneSet) > 1 { // Regional PD - disk, err = gce.getRegionalDiskByName(name) + disk, err = g.getRegionalDiskByName(name) if err != nil { return nil, err } } else { // Zonal PD - disk, err = gce.getDiskByName(name, zone) + disk, err = g.getDiskByName(name, zone) if err != nil { return nil, err } } } else { - disk, err = gce.getDiskByName(name, zone) + disk, err = g.getDiskByName(name, zone) if err != nil { return nil, err } @@ -867,11 +866,11 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st return labels, nil } -// Returns a GCEDisk for the disk, if it is found in the specified zone. +// Returns a Disk for the disk, if it is found in the specified zone. // If not found, returns (nil, nil) -func (gce *GCECloud) findDiskByName(diskName string, zone string) (*GCEDisk, error) { - mc := newDiskMetricContextZonal("get", gce.region, zone) - disk, err := gce.manager.GetDiskFromCloudProvider(zone, diskName) +func (g *Cloud) findDiskByName(diskName string, zone string) (*Disk, error) { + mc := newDiskMetricContextZonal("get", g.region, zone) + disk, err := g.manager.GetDiskFromCloudProvider(zone, diskName) if err == nil { return disk, mc.Observe(nil) } @@ -882,19 +881,19 @@ func (gce *GCECloud) findDiskByName(diskName string, zone string) (*GCEDisk, err } // Like findDiskByName, but returns an error if the disk is not found -func (gce *GCECloud) getDiskByName(diskName string, zone string) (*GCEDisk, error) { - disk, err := gce.findDiskByName(diskName, zone) +func (g *Cloud) getDiskByName(diskName string, zone string) (*Disk, error) { + disk, err := g.findDiskByName(diskName, zone) if disk == nil && err == nil { return nil, fmt.Errorf("GCE persistent disk not found: diskName=%q zone=%q", diskName, zone) } return disk, err } -// Returns a GCEDisk for the regional disk, if it is found. +// Returns a Disk for the regional disk, if it is found. // If not found, returns (nil, nil) -func (gce *GCECloud) findRegionalDiskByName(diskName string) (*GCEDisk, error) { - mc := newDiskMetricContextRegional("get", gce.region) - disk, err := gce.manager.GetRegionalDiskFromCloudProvider(diskName) +func (g *Cloud) findRegionalDiskByName(diskName string) (*Disk, error) { + mc := newDiskMetricContextRegional("get", g.region) + disk, err := g.manager.GetRegionalDiskFromCloudProvider(diskName) if err == nil { return disk, mc.Observe(nil) } @@ -905,20 +904,20 @@ func (gce *GCECloud) findRegionalDiskByName(diskName string) (*GCEDisk, error) { } // Like findRegionalDiskByName, but returns an error if the disk is not found -func (gce *GCECloud) getRegionalDiskByName(diskName string) (*GCEDisk, error) { - disk, err := gce.findRegionalDiskByName(diskName) +func (g *Cloud) getRegionalDiskByName(diskName string) (*Disk, error) { + disk, err := g.findRegionalDiskByName(diskName) if disk == nil && err == nil { return nil, fmt.Errorf("GCE regional persistent disk not found: diskName=%q", diskName) } return disk, err } -// Scans all managed zones to return the GCE PD +// GetDiskByNameUnknownZone scans all managed zones to return the GCE PD // Prefer getDiskByName, if the zone can be established // Return cloudprovider.DiskNotFound if the given disk cannot be found in any zone -func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error) { +func (g *Cloud) GetDiskByNameUnknownZone(diskName string) (*Disk, error) { if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { - regionalDisk, err := gce.getRegionalDiskByName(diskName) + regionalDisk, err := g.getRegionalDiskByName(diskName) if err == nil { return regionalDisk, err } @@ -934,9 +933,9 @@ func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error) // admission control, but that might be a little weird (values changing // on create) - var found *GCEDisk - for _, zone := range gce.managedZones { - disk, err := gce.findDiskByName(diskName, zone) + var found *Disk + for _, zone := range g.managedZones { + disk, err := g.findDiskByName(diskName, zone) if err != nil { return nil, err } @@ -949,7 +948,7 @@ func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error) switch zoneInfo := disk.ZoneInfo.(type) { case multiZone: if zoneInfo.replicaZones.Has(zone) { - glog.Warningf("GCE PD name (%q) was found in multiple zones (%q), but ok because it is a RegionalDisk.", + klog.Warningf("GCE PD name (%q) was found in multiple zones (%q), but ok because it is a RegionalDisk.", diskName, zoneInfo.replicaZones) continue } @@ -963,15 +962,15 @@ func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error) if found != nil { return found, nil } - glog.Warningf("GCE persistent disk %q not found in managed zones (%s)", - diskName, strings.Join(gce.managedZones, ",")) + klog.Warningf("GCE persistent disk %q not found in managed zones (%s)", + diskName, strings.Join(g.managedZones, ",")) return nil, cloudprovider.DiskNotFound } // encodeDiskTags encodes requested volume tags into JSON string, as GCE does // not support tags on GCE PDs and we use Description field as fallback. -func (gce *GCECloud) encodeDiskTags(tags map[string]string) (string, error) { +func (g *Cloud) encodeDiskTags(tags map[string]string) (string, error) { if len(tags) == 0 { // No tags -> empty JSON return "", nil @@ -984,8 +983,8 @@ func (gce *GCECloud) encodeDiskTags(tags map[string]string) (string, error) { return string(enc), nil } -func (gce *GCECloud) doDeleteDisk(diskToDelete string) error { - disk, err := gce.GetDiskByNameUnknownZone(diskToDelete) +func (g *Cloud) doDeleteDisk(diskToDelete string) error { + disk, err := g.GetDiskByNameUnknownZone(diskToDelete) if err != nil { return err } @@ -995,14 +994,14 @@ func (gce *GCECloud) doDeleteDisk(diskToDelete string) error { switch zoneInfo := disk.ZoneInfo.(type) { case singleZone: mc = newDiskMetricContextZonal("delete", disk.Region, zoneInfo.zone) - return mc.Observe(gce.manager.DeleteDiskOnCloudProvider(zoneInfo.zone, disk.Name)) + return mc.Observe(g.manager.DeleteDiskOnCloudProvider(zoneInfo.zone, disk.Name)) case multiZone: if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { return fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo) } mc = newDiskMetricContextRegional("delete", disk.Region) - return mc.Observe(gce.manager.DeleteRegionalDiskOnCloudProvider(disk.Name)) + return mc.Observe(g.manager.DeleteRegionalDiskOnCloudProvider(disk.Name)) case nil: return fmt.Errorf("PD has nil ZoneInfo: %v", disk) default: diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_fake.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_fake.go new file mode 100644 index 0000000000000..73a724d74ae01 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_fake.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +import ( + "fmt" + "net/http" + + compute "google.golang.org/api/compute/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" +) + +// TestClusterValues holds the config values for the fake/test gce cloud object. +type TestClusterValues struct { + ProjectID string + Region string + ZoneName string + SecondaryZoneName string + ClusterID string + ClusterName string +} + +// DefaultTestClusterValues Creates a reasonable set of default cluster values +// for generating a new test fake GCE cloud instance. +func DefaultTestClusterValues() TestClusterValues { + return TestClusterValues{ + ProjectID: "test-project", + Region: "us-central1", + ZoneName: "us-central1-b", + SecondaryZoneName: "us-central1-c", + ClusterID: "test-cluster-id", + ClusterName: "Test Cluster Name", + } +} + +type fakeRoundTripper struct{} + +func (*fakeRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { + return nil, fmt.Errorf("err: test used fake http client") +} + +// Stubs ClusterID so that ClusterID.getOrInitialize() does not require calling +// gce.Initialize() +func fakeClusterID(clusterID string) ClusterID { + return ClusterID{ + clusterID: &clusterID, + store: cache.NewStore(func(obj interface{}) (string, error) { + return "", nil + }), + } +} + +// NewFakeGCECloud constructs a fake GCE Cloud from the cluster values. +func NewFakeGCECloud(vals TestClusterValues) *Cloud { + client := &http.Client{Transport: &fakeRoundTripper{}} + service, _ := compute.New(client) + gce := &Cloud{ + region: vals.Region, + service: service, + managedZones: []string{vals.ZoneName}, + projectID: vals.ProjectID, + networkProjectID: vals.ProjectID, + ClusterID: fakeClusterID(vals.ClusterID), + } + c := cloud.NewMockGCE(&gceProjectRouter{gce}) + gce.c = c + return gce +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_firewall.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_firewall.go index e138df87471b8..4aea497095622 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_firewall.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_firewall.go @@ -28,38 +28,38 @@ func newFirewallMetricContext(request string) *metricContext { } // GetFirewall returns the Firewall by name. -func (gce *GCECloud) GetFirewall(name string) (*compute.Firewall, error) { +func (g *Cloud) GetFirewall(name string) (*compute.Firewall, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newFirewallMetricContext("get") - v, err := gce.c.Firewalls().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.Firewalls().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // CreateFirewall creates the passed firewall -func (gce *GCECloud) CreateFirewall(f *compute.Firewall) error { +func (g *Cloud) CreateFirewall(f *compute.Firewall) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newFirewallMetricContext("create") - return mc.Observe(gce.c.Firewalls().Insert(ctx, meta.GlobalKey(f.Name), f)) + return mc.Observe(g.c.Firewalls().Insert(ctx, meta.GlobalKey(f.Name), f)) } // DeleteFirewall deletes the given firewall rule. -func (gce *GCECloud) DeleteFirewall(name string) error { +func (g *Cloud) DeleteFirewall(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newFirewallMetricContext("delete") - return mc.Observe(gce.c.Firewalls().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.Firewalls().Delete(ctx, meta.GlobalKey(name))) } // UpdateFirewall applies the given firewall as an update to an existing service. -func (gce *GCECloud) UpdateFirewall(f *compute.Firewall) error { +func (g *Cloud) UpdateFirewall(f *compute.Firewall) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newFirewallMetricContext("update") - return mc.Observe(gce.c.Firewalls().Update(ctx, meta.GlobalKey(f.Name), f)) + return mc.Observe(g.c.Firewalls().Update(ctx, meta.GlobalKey(f.Name), f)) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule.go index b40652c98e32b..5689cfd32eaa1 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule.go @@ -32,129 +32,129 @@ func newForwardingRuleMetricContextWithVersion(request, region, version string) } // CreateGlobalForwardingRule creates the passed GlobalForwardingRule -func (gce *GCECloud) CreateGlobalForwardingRule(rule *compute.ForwardingRule) error { +func (g *Cloud) CreateGlobalForwardingRule(rule *compute.ForwardingRule) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("create", "") - return mc.Observe(gce.c.GlobalForwardingRules().Insert(ctx, meta.GlobalKey(rule.Name), rule)) + return mc.Observe(g.c.GlobalForwardingRules().Insert(ctx, meta.GlobalKey(rule.Name), rule)) } // SetProxyForGlobalForwardingRule links the given TargetHttp(s)Proxy with the given GlobalForwardingRule. // targetProxyLink is the SelfLink of a TargetHttp(s)Proxy. -func (gce *GCECloud) SetProxyForGlobalForwardingRule(forwardingRuleName, targetProxyLink string) error { +func (g *Cloud) SetProxyForGlobalForwardingRule(forwardingRuleName, targetProxyLink string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("set_proxy", "") target := &compute.TargetReference{Target: targetProxyLink} - return mc.Observe(gce.c.GlobalForwardingRules().SetTarget(ctx, meta.GlobalKey(forwardingRuleName), target)) + return mc.Observe(g.c.GlobalForwardingRules().SetTarget(ctx, meta.GlobalKey(forwardingRuleName), target)) } // DeleteGlobalForwardingRule deletes the GlobalForwardingRule by name. -func (gce *GCECloud) DeleteGlobalForwardingRule(name string) error { +func (g *Cloud) DeleteGlobalForwardingRule(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("delete", "") - return mc.Observe(gce.c.GlobalForwardingRules().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.GlobalForwardingRules().Delete(ctx, meta.GlobalKey(name))) } // GetGlobalForwardingRule returns the GlobalForwardingRule by name. -func (gce *GCECloud) GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error) { +func (g *Cloud) GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("get", "") - v, err := gce.c.GlobalForwardingRules().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.GlobalForwardingRules().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // ListGlobalForwardingRules lists all GlobalForwardingRules in the project. -func (gce *GCECloud) ListGlobalForwardingRules() ([]*compute.ForwardingRule, error) { +func (g *Cloud) ListGlobalForwardingRules() ([]*compute.ForwardingRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("list", "") - v, err := gce.c.GlobalForwardingRules().List(ctx, filter.None) + v, err := g.c.GlobalForwardingRules().List(ctx, filter.None) return v, mc.Observe(err) } // GetRegionForwardingRule returns the RegionalForwardingRule by name & region. -func (gce *GCECloud) GetRegionForwardingRule(name, region string) (*compute.ForwardingRule, error) { +func (g *Cloud) GetRegionForwardingRule(name, region string) (*compute.ForwardingRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("get", region) - v, err := gce.c.ForwardingRules().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.ForwardingRules().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // GetAlphaRegionForwardingRule returns the Alpha forwarding rule by name & region. -func (gce *GCECloud) GetAlphaRegionForwardingRule(name, region string) (*computealpha.ForwardingRule, error) { +func (g *Cloud) GetAlphaRegionForwardingRule(name, region string) (*computealpha.ForwardingRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContextWithVersion("get", region, computeAlphaVersion) - v, err := gce.c.AlphaForwardingRules().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.AlphaForwardingRules().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // ListRegionForwardingRules lists all RegionalForwardingRules in the project & region. -func (gce *GCECloud) ListRegionForwardingRules(region string) ([]*compute.ForwardingRule, error) { +func (g *Cloud) ListRegionForwardingRules(region string) ([]*compute.ForwardingRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("list", region) - v, err := gce.c.ForwardingRules().List(ctx, region, filter.None) + v, err := g.c.ForwardingRules().List(ctx, region, filter.None) return v, mc.Observe(err) } // ListAlphaRegionForwardingRules lists all RegionalForwardingRules in the project & region. -func (gce *GCECloud) ListAlphaRegionForwardingRules(region string) ([]*computealpha.ForwardingRule, error) { +func (g *Cloud) ListAlphaRegionForwardingRules(region string) ([]*computealpha.ForwardingRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContextWithVersion("list", region, computeAlphaVersion) - v, err := gce.c.AlphaForwardingRules().List(ctx, region, filter.None) + v, err := g.c.AlphaForwardingRules().List(ctx, region, filter.None) return v, mc.Observe(err) } // CreateRegionForwardingRule creates and returns a // RegionalForwardingRule that points to the given BackendService -func (gce *GCECloud) CreateRegionForwardingRule(rule *compute.ForwardingRule, region string) error { +func (g *Cloud) CreateRegionForwardingRule(rule *compute.ForwardingRule, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("create", region) - return mc.Observe(gce.c.ForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule)) + return mc.Observe(g.c.ForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule)) } // CreateAlphaRegionForwardingRule creates and returns an Alpha // forwarding fule in the given region. -func (gce *GCECloud) CreateAlphaRegionForwardingRule(rule *computealpha.ForwardingRule, region string) error { +func (g *Cloud) CreateAlphaRegionForwardingRule(rule *computealpha.ForwardingRule, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContextWithVersion("create", region, computeAlphaVersion) - return mc.Observe(gce.c.AlphaForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule)) + return mc.Observe(g.c.AlphaForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule)) } // DeleteRegionForwardingRule deletes the RegionalForwardingRule by name & region. -func (gce *GCECloud) DeleteRegionForwardingRule(name, region string) error { +func (g *Cloud) DeleteRegionForwardingRule(name, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("delete", region) - return mc.Observe(gce.c.ForwardingRules().Delete(ctx, meta.RegionalKey(name, region))) + return mc.Observe(g.c.ForwardingRules().Delete(ctx, meta.RegionalKey(name, region))) } // TODO(#51665): retire this function once Network Tiers becomes Beta in GCP. -func (gce *GCECloud) getNetworkTierFromForwardingRule(name, region string) (string, error) { - if !gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { +func (g *Cloud) getNetworkTierFromForwardingRule(name, region string) (string, error) { + if !g.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { return cloud.NetworkTierDefault.ToGCEValue(), nil } - fwdRule, err := gce.GetAlphaRegionForwardingRule(name, region) + fwdRule, err := g.GetAlphaRegionForwardingRule(name, region) if err != nil { return handleAlphaNetworkTierGetError(err) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go index 8dc913a6089dc..d314376db1214 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go @@ -17,18 +17,18 @@ limitations under the License. package gce import ( - "github.com/golang/glog" + "k8s.io/klog" computealpha "google.golang.org/api/compute/v0.alpha" computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" "k8s.io/api/core/v1" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" "k8s.io/kubernetes/pkg/master/ports" - utilversion "k8s.io/kubernetes/pkg/util/version" ) const ( @@ -42,7 +42,7 @@ var ( func init() { if v, err := utilversion.ParseGeneric("1.7.2"); err != nil { - glog.Fatalf("Failed to parse version for minNodesHealthCheckVersion: %v", err) + klog.Fatalf("Failed to parse version for minNodesHealthCheckVersion: %v", err) } else { minNodesHealthCheckVersion = v } @@ -56,204 +56,204 @@ func newHealthcheckMetricContextWithVersion(request, version string) *metricCont return newGenericMetricContext("healthcheck", request, unusedMetricLabel, unusedMetricLabel, version) } -// GetHttpHealthCheck returns the given HttpHealthCheck by name. -func (gce *GCECloud) GetHttpHealthCheck(name string) (*compute.HttpHealthCheck, error) { +// GetHTTPHealthCheck returns the given HttpHealthCheck by name. +func (g *Cloud) GetHTTPHealthCheck(name string) (*compute.HttpHealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("get_legacy") - v, err := gce.c.HttpHealthChecks().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.HttpHealthChecks().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } -// UpdateHttpHealthCheck applies the given HttpHealthCheck as an update. -func (gce *GCECloud) UpdateHttpHealthCheck(hc *compute.HttpHealthCheck) error { +// UpdateHTTPHealthCheck applies the given HttpHealthCheck as an update. +func (g *Cloud) UpdateHTTPHealthCheck(hc *compute.HttpHealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("update_legacy") - return mc.Observe(gce.c.HttpHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.HttpHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) } -// DeleteHttpHealthCheck deletes the given HttpHealthCheck by name. -func (gce *GCECloud) DeleteHttpHealthCheck(name string) error { +// DeleteHTTPHealthCheck deletes the given HttpHealthCheck by name. +func (g *Cloud) DeleteHTTPHealthCheck(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("delete_legacy") - return mc.Observe(gce.c.HttpHealthChecks().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.HttpHealthChecks().Delete(ctx, meta.GlobalKey(name))) } -// CreateHttpHealthCheck creates the given HttpHealthCheck. -func (gce *GCECloud) CreateHttpHealthCheck(hc *compute.HttpHealthCheck) error { +// CreateHTTPHealthCheck creates the given HttpHealthCheck. +func (g *Cloud) CreateHTTPHealthCheck(hc *compute.HttpHealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("create_legacy") - return mc.Observe(gce.c.HttpHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.HttpHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) } -// ListHttpHealthChecks lists all HttpHealthChecks in the project. -func (gce *GCECloud) ListHttpHealthChecks() ([]*compute.HttpHealthCheck, error) { +// ListHTTPHealthChecks lists all HttpHealthChecks in the project. +func (g *Cloud) ListHTTPHealthChecks() ([]*compute.HttpHealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("list_legacy") - v, err := gce.c.HttpHealthChecks().List(ctx, filter.None) + v, err := g.c.HttpHealthChecks().List(ctx, filter.None) return v, mc.Observe(err) } // Legacy HTTPS Health Checks -// GetHttpsHealthCheck returns the given HttpsHealthCheck by name. -func (gce *GCECloud) GetHttpsHealthCheck(name string) (*compute.HttpsHealthCheck, error) { +// GetHTTPSHealthCheck returns the given HttpsHealthCheck by name. +func (g *Cloud) GetHTTPSHealthCheck(name string) (*compute.HttpsHealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("get_legacy") - v, err := gce.c.HttpsHealthChecks().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.HttpsHealthChecks().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } -// UpdateHttpsHealthCheck applies the given HttpsHealthCheck as an update. -func (gce *GCECloud) UpdateHttpsHealthCheck(hc *compute.HttpsHealthCheck) error { +// UpdateHTTPSHealthCheck applies the given HttpsHealthCheck as an update. +func (g *Cloud) UpdateHTTPSHealthCheck(hc *compute.HttpsHealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("update_legacy") - return mc.Observe(gce.c.HttpsHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.HttpsHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) } -// DeleteHttpsHealthCheck deletes the given HttpsHealthCheck by name. -func (gce *GCECloud) DeleteHttpsHealthCheck(name string) error { +// DeleteHTTPSHealthCheck deletes the given HttpsHealthCheck by name. +func (g *Cloud) DeleteHTTPSHealthCheck(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("delete_legacy") - return mc.Observe(gce.c.HttpsHealthChecks().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.HttpsHealthChecks().Delete(ctx, meta.GlobalKey(name))) } -// CreateHttpsHealthCheck creates the given HttpsHealthCheck. -func (gce *GCECloud) CreateHttpsHealthCheck(hc *compute.HttpsHealthCheck) error { +// CreateHTTPSHealthCheck creates the given HttpsHealthCheck. +func (g *Cloud) CreateHTTPSHealthCheck(hc *compute.HttpsHealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("create_legacy") - return mc.Observe(gce.c.HttpsHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.HttpsHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) } -// ListHttpsHealthChecks lists all HttpsHealthChecks in the project. -func (gce *GCECloud) ListHttpsHealthChecks() ([]*compute.HttpsHealthCheck, error) { +// ListHTTPSHealthChecks lists all HttpsHealthChecks in the project. +func (g *Cloud) ListHTTPSHealthChecks() ([]*compute.HttpsHealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("list_legacy") - v, err := gce.c.HttpsHealthChecks().List(ctx, filter.None) + v, err := g.c.HttpsHealthChecks().List(ctx, filter.None) return v, mc.Observe(err) } // Generic HealthCheck // GetHealthCheck returns the given HealthCheck by name. -func (gce *GCECloud) GetHealthCheck(name string) (*compute.HealthCheck, error) { +func (g *Cloud) GetHealthCheck(name string) (*compute.HealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("get") - v, err := gce.c.HealthChecks().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.HealthChecks().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // GetAlphaHealthCheck returns the given alpha HealthCheck by name. -func (gce *GCECloud) GetAlphaHealthCheck(name string) (*computealpha.HealthCheck, error) { +func (g *Cloud) GetAlphaHealthCheck(name string) (*computealpha.HealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContextWithVersion("get", computeAlphaVersion) - v, err := gce.c.AlphaHealthChecks().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.AlphaHealthChecks().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // GetBetaHealthCheck returns the given beta HealthCheck by name. -func (gce *GCECloud) GetBetaHealthCheck(name string) (*computebeta.HealthCheck, error) { +func (g *Cloud) GetBetaHealthCheck(name string) (*computebeta.HealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContextWithVersion("get", computeBetaVersion) - v, err := gce.c.BetaHealthChecks().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.BetaHealthChecks().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // UpdateHealthCheck applies the given HealthCheck as an update. -func (gce *GCECloud) UpdateHealthCheck(hc *compute.HealthCheck) error { +func (g *Cloud) UpdateHealthCheck(hc *compute.HealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("update") - return mc.Observe(gce.c.HealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.HealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) } // UpdateAlphaHealthCheck applies the given alpha HealthCheck as an update. -func (gce *GCECloud) UpdateAlphaHealthCheck(hc *computealpha.HealthCheck) error { +func (g *Cloud) UpdateAlphaHealthCheck(hc *computealpha.HealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContextWithVersion("update", computeAlphaVersion) - return mc.Observe(gce.c.AlphaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.AlphaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) } // UpdateBetaHealthCheck applies the given beta HealthCheck as an update. -func (gce *GCECloud) UpdateBetaHealthCheck(hc *computebeta.HealthCheck) error { +func (g *Cloud) UpdateBetaHealthCheck(hc *computebeta.HealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContextWithVersion("update", computeBetaVersion) - return mc.Observe(gce.c.BetaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.BetaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) } // DeleteHealthCheck deletes the given HealthCheck by name. -func (gce *GCECloud) DeleteHealthCheck(name string) error { +func (g *Cloud) DeleteHealthCheck(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("delete") - return mc.Observe(gce.c.HealthChecks().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.HealthChecks().Delete(ctx, meta.GlobalKey(name))) } // CreateHealthCheck creates the given HealthCheck. -func (gce *GCECloud) CreateHealthCheck(hc *compute.HealthCheck) error { +func (g *Cloud) CreateHealthCheck(hc *compute.HealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("create") - return mc.Observe(gce.c.HealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.HealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) } // CreateAlphaHealthCheck creates the given alpha HealthCheck. -func (gce *GCECloud) CreateAlphaHealthCheck(hc *computealpha.HealthCheck) error { +func (g *Cloud) CreateAlphaHealthCheck(hc *computealpha.HealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContextWithVersion("create", computeAlphaVersion) - return mc.Observe(gce.c.AlphaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.AlphaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) } // CreateBetaHealthCheck creates the given beta HealthCheck. -func (gce *GCECloud) CreateBetaHealthCheck(hc *computebeta.HealthCheck) error { +func (g *Cloud) CreateBetaHealthCheck(hc *computebeta.HealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContextWithVersion("create", computeBetaVersion) - return mc.Observe(gce.c.BetaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.BetaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) } // ListHealthChecks lists all HealthCheck in the project. -func (gce *GCECloud) ListHealthChecks() ([]*compute.HealthCheck, error) { +func (g *Cloud) ListHealthChecks() ([]*compute.HealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("list") - v, err := gce.c.HealthChecks().List(ctx, filter.None) + v, err := g.c.HealthChecks().List(ctx, filter.None) return v, mc.Observe(err) } @@ -274,7 +274,7 @@ func GetNodesHealthCheckPath() string { func isAtLeastMinNodesHealthCheckVersion(vstring string) bool { version, err := utilversion.ParseGeneric(vstring) if err != nil { - glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) + klog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) return false } return version.AtLeast(minNodesHealthCheckVersion) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instancegroup.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instancegroup.go index 13b2c51e503bf..edc5f093339bf 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instancegroup.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instancegroup.go @@ -30,49 +30,49 @@ func newInstanceGroupMetricContext(request string, zone string) *metricContext { // CreateInstanceGroup creates an instance group with the given // instances. It is the callers responsibility to add named ports. -func (gce *GCECloud) CreateInstanceGroup(ig *compute.InstanceGroup, zone string) error { +func (g *Cloud) CreateInstanceGroup(ig *compute.InstanceGroup, zone string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstanceGroupMetricContext("create", zone) - return mc.Observe(gce.c.InstanceGroups().Insert(ctx, meta.ZonalKey(ig.Name, zone), ig)) + return mc.Observe(g.c.InstanceGroups().Insert(ctx, meta.ZonalKey(ig.Name, zone), ig)) } // DeleteInstanceGroup deletes an instance group. -func (gce *GCECloud) DeleteInstanceGroup(name string, zone string) error { +func (g *Cloud) DeleteInstanceGroup(name string, zone string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstanceGroupMetricContext("delete", zone) - return mc.Observe(gce.c.InstanceGroups().Delete(ctx, meta.ZonalKey(name, zone))) + return mc.Observe(g.c.InstanceGroups().Delete(ctx, meta.ZonalKey(name, zone))) } // ListInstanceGroups lists all InstanceGroups in the project and // zone. -func (gce *GCECloud) ListInstanceGroups(zone string) ([]*compute.InstanceGroup, error) { +func (g *Cloud) ListInstanceGroups(zone string) ([]*compute.InstanceGroup, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstanceGroupMetricContext("list", zone) - v, err := gce.c.InstanceGroups().List(ctx, zone, filter.None) + v, err := g.c.InstanceGroups().List(ctx, zone, filter.None) return v, mc.Observe(err) } // ListInstancesInInstanceGroup lists all the instances in a given // instance group and state. -func (gce *GCECloud) ListInstancesInInstanceGroup(name string, zone string, state string) ([]*compute.InstanceWithNamedPorts, error) { +func (g *Cloud) ListInstancesInInstanceGroup(name string, zone string, state string) ([]*compute.InstanceWithNamedPorts, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstanceGroupMetricContext("list_instances", zone) req := &compute.InstanceGroupsListInstancesRequest{InstanceState: state} - v, err := gce.c.InstanceGroups().ListInstances(ctx, meta.ZonalKey(name, zone), req, filter.None) + v, err := g.c.InstanceGroups().ListInstances(ctx, meta.ZonalKey(name, zone), req, filter.None) return v, mc.Observe(err) } // AddInstancesToInstanceGroup adds the given instances to the given // instance group. -func (gce *GCECloud) AddInstancesToInstanceGroup(name string, zone string, instanceRefs []*compute.InstanceReference) error { +func (g *Cloud) AddInstancesToInstanceGroup(name string, zone string, instanceRefs []*compute.InstanceReference) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -84,12 +84,12 @@ func (gce *GCECloud) AddInstancesToInstanceGroup(name string, zone string, insta req := &compute.InstanceGroupsAddInstancesRequest{ Instances: instanceRefs, } - return mc.Observe(gce.c.InstanceGroups().AddInstances(ctx, meta.ZonalKey(name, zone), req)) + return mc.Observe(g.c.InstanceGroups().AddInstances(ctx, meta.ZonalKey(name, zone), req)) } // RemoveInstancesFromInstanceGroup removes the given instances from // the instance group. -func (gce *GCECloud) RemoveInstancesFromInstanceGroup(name string, zone string, instanceRefs []*compute.InstanceReference) error { +func (g *Cloud) RemoveInstancesFromInstanceGroup(name string, zone string, instanceRefs []*compute.InstanceReference) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -101,25 +101,25 @@ func (gce *GCECloud) RemoveInstancesFromInstanceGroup(name string, zone string, req := &compute.InstanceGroupsRemoveInstancesRequest{ Instances: instanceRefs, } - return mc.Observe(gce.c.InstanceGroups().RemoveInstances(ctx, meta.ZonalKey(name, zone), req)) + return mc.Observe(g.c.InstanceGroups().RemoveInstances(ctx, meta.ZonalKey(name, zone), req)) } // SetNamedPortsOfInstanceGroup sets the list of named ports on a given instance group -func (gce *GCECloud) SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error { +func (g *Cloud) SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstanceGroupMetricContext("set_namedports", zone) req := &compute.InstanceGroupsSetNamedPortsRequest{NamedPorts: namedPorts} - return mc.Observe(gce.c.InstanceGroups().SetNamedPorts(ctx, meta.ZonalKey(igName, zone), req)) + return mc.Observe(g.c.InstanceGroups().SetNamedPorts(ctx, meta.ZonalKey(igName, zone), req)) } // GetInstanceGroup returns an instance group by name. -func (gce *GCECloud) GetInstanceGroup(name string, zone string) (*compute.InstanceGroup, error) { +func (g *Cloud) GetInstanceGroup(name string, zone string) (*compute.InstanceGroup, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstanceGroupMetricContext("get", zone) - v, err := gce.c.InstanceGroups().Get(ctx, meta.ZonalKey(name, zone)) + v, err := g.c.InstanceGroups().Get(ctx, meta.ZonalKey(name, zone)) return v, mc.Observe(err) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go index 4e791accd420e..e8345abfae564 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go @@ -25,15 +25,15 @@ import ( "time" "cloud.google.com/go/compute/metadata" - "github.com/golang/glog" computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" @@ -67,22 +67,22 @@ func getZone(n *v1.Node) string { return zone } -func makeHostURL(projectsApiEndpoint, projectID, zone, host string) string { +func makeHostURL(projectsAPIEndpoint, projectID, zone, host string) string { host = canonicalizeInstanceName(host) - return projectsApiEndpoint + strings.Join([]string{projectID, "zones", zone, "instances", host}, "/") + return projectsAPIEndpoint + strings.Join([]string{projectID, "zones", zone, "instances", host}, "/") } // ToInstanceReferences returns instance references by links -func (gce *GCECloud) ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference) { +func (g *Cloud) ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference) { for _, ins := range instanceNames { - instanceLink := makeHostURL(gce.service.BasePath, gce.projectID, zone, ins) + instanceLink := makeHostURL(g.service.BasePath, g.projectID, zone, ins) refs = append(refs, &compute.InstanceReference{Instance: instanceLink}) } return refs } // NodeAddresses is an implementation of Instances.NodeAddresses. -func (gce *GCECloud) NodeAddresses(_ context.Context, _ types.NodeName) ([]v1.NodeAddress, error) { +func (g *Cloud) NodeAddresses(_ context.Context, _ types.NodeName) ([]v1.NodeAddress, error) { internalIP, err := metadata.Get("instance/network-interfaces/0/ip") if err != nil { return nil, fmt.Errorf("couldn't get internal IP: %v", err) @@ -97,7 +97,7 @@ func (gce *GCECloud) NodeAddresses(_ context.Context, _ types.NodeName) ([]v1.No } if internalDNSFull, err := metadata.Get("instance/hostname"); err != nil { - glog.Warningf("couldn't get full internal DNS name: %v", err) + klog.Warningf("couldn't get full internal DNS name: %v", err) } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: internalDNSFull}, @@ -109,7 +109,7 @@ func (gce *GCECloud) NodeAddresses(_ context.Context, _ types.NodeName) ([]v1.No // NodeAddressesByProviderID will not be called from the node that is requesting this ID. // i.e. metadata service and other local methods cannot be used here -func (gce *GCECloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) { +func (g *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -118,7 +118,7 @@ func (gce *GCECloud) NodeAddressesByProviderID(ctx context.Context, providerID s return []v1.NodeAddress{}, err } - instance, err := gce.c.Instances().Get(ctx, meta.ZonalKey(canonicalizeInstanceName(name), zone)) + instance, err := g.c.Instances().Get(ctx, meta.ZonalKey(canonicalizeInstanceName(name), zone)) if err != nil { return []v1.NodeAddress{}, fmt.Errorf("error while querying for providerID %q: %v", providerID, err) } @@ -138,13 +138,13 @@ func (gce *GCECloud) NodeAddressesByProviderID(ctx context.Context, providerID s // instanceByProviderID returns the cloudprovider instance of the node // with the specified unique providerID -func (gce *GCECloud) instanceByProviderID(providerID string) (*gceInstance, error) { +func (g *Cloud) instanceByProviderID(providerID string) (*gceInstance, error) { project, zone, name, err := splitProviderID(providerID) if err != nil { return nil, err } - instance, err := gce.getInstanceFromProjectInZoneByName(project, zone, name) + instance, err := g.getInstanceFromProjectInZoneByName(project, zone, name) if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { return nil, cloudprovider.InstanceNotFound @@ -156,7 +156,7 @@ func (gce *GCECloud) instanceByProviderID(providerID string) (*gceInstance, erro } // InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes -func (gce *GCECloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { +func (g *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { return false, cloudprovider.NotImplemented } @@ -164,8 +164,8 @@ func (gce *GCECloud) InstanceShutdownByProviderID(ctx context.Context, providerI // with the specified unique providerID This method will not be called from the // node that is requesting this ID. i.e. metadata service and other local // methods cannot be used here -func (gce *GCECloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) { - instance, err := gce.instanceByProviderID(providerID) +func (g *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) { + instance, err := g.instanceByProviderID(providerID) if err != nil { return "", err } @@ -175,8 +175,8 @@ func (gce *GCECloud) InstanceTypeByProviderID(ctx context.Context, providerID st // InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. // If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. -func (gce *GCECloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { - _, err := gce.instanceByProviderID(providerID) +func (g *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { + _, err := g.instanceByProviderID(providerID) if err != nil { if err == cloudprovider.InstanceNotFound { return false, nil @@ -188,51 +188,53 @@ func (gce *GCECloud) InstanceExistsByProviderID(ctx context.Context, providerID } // InstanceID returns the cloud provider ID of the node with the specified NodeName. -func (gce *GCECloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) { +func (g *Cloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) { instanceName := mapNodeNameToInstanceName(nodeName) - if gce.useMetadataServer { + if g.useMetadataServer { // Use metadata, if possible, to fetch ID. See issue #12000 - if gce.isCurrentInstance(instanceName) { + if g.isCurrentInstance(instanceName) { projectID, zone, err := getProjectAndZone() if err == nil { return projectID + "/" + zone + "/" + canonicalizeInstanceName(instanceName), nil } } } - instance, err := gce.getInstanceByName(instanceName) + instance, err := g.getInstanceByName(instanceName) if err != nil { return "", err } - return gce.projectID + "/" + instance.Zone + "/" + instance.Name, nil + return g.projectID + "/" + instance.Zone + "/" + instance.Name, nil } // InstanceType returns the type of the specified node with the specified NodeName. -func (gce *GCECloud) InstanceType(ctx context.Context, nodeName types.NodeName) (string, error) { +func (g *Cloud) InstanceType(ctx context.Context, nodeName types.NodeName) (string, error) { instanceName := mapNodeNameToInstanceName(nodeName) - if gce.useMetadataServer { + if g.useMetadataServer { // Use metadata, if possible, to fetch ID. See issue #12000 - if gce.isCurrentInstance(instanceName) { + if g.isCurrentInstance(instanceName) { mType, err := getCurrentMachineTypeViaMetadata() if err == nil { return mType, nil } } } - instance, err := gce.getInstanceByName(instanceName) + instance, err := g.getInstanceByName(instanceName) if err != nil { return "", err } return instance.Type, nil } -func (gce *GCECloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error { +// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances +// expected format for the key is standard ssh-keygen format: +func (g *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() return wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) { - project, err := gce.c.Projects().Get(ctx, gce.projectID) + project, err := g.c.Projects().Get(ctx, g.projectID) if err != nil { - glog.Errorf("Could not get project: %v", err) + klog.Errorf("Could not get project: %v", err) return false, nil } keyString := fmt.Sprintf("%s:%s %s@%s", user, strings.TrimSpace(string(keyData)), user, user) @@ -241,7 +243,7 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(ctx context.Context, user string, k if item.Key == "sshKeys" { if strings.Contains(*item.Value, keyString) { // We've already added the key - glog.Info("SSHKey already in project metadata") + klog.Info("SSHKey already in project metadata") return true, nil } value := *item.Value + "\n" + keyString @@ -252,7 +254,7 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(ctx context.Context, user string, k } if !found { // This is super unlikely, so log. - glog.Infof("Failed to find sshKeys metadata, creating a new item") + klog.Infof("Failed to find sshKeys metadata, creating a new item") project.CommonInstanceMetadata.Items = append(project.CommonInstanceMetadata.Items, &compute.MetadataItems{ Key: "sshKeys", @@ -261,31 +263,31 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(ctx context.Context, user string, k } mc := newInstancesMetricContext("add_ssh_key", "") - err = gce.c.Projects().SetCommonInstanceMetadata(ctx, gce.projectID, project.CommonInstanceMetadata) + err = g.c.Projects().SetCommonInstanceMetadata(ctx, g.projectID, project.CommonInstanceMetadata) mc.Observe(err) if err != nil { - glog.Errorf("Could not Set Metadata: %v", err) + klog.Errorf("Could not Set Metadata: %v", err) return false, nil } - glog.Infof("Successfully added sshKey to project metadata") + klog.Infof("Successfully added sshKey to project metadata") return true, nil }) } // GetAllCurrentZones returns all the zones in which k8s nodes are currently running -func (gce *GCECloud) GetAllCurrentZones() (sets.String, error) { - if gce.nodeInformerSynced == nil { - glog.Warningf("GCECloud object does not have informers set, should only happen in E2E binary.") - return gce.GetAllZonesFromCloudProvider() - } - gce.nodeZonesLock.Lock() - defer gce.nodeZonesLock.Unlock() - if !gce.nodeInformerSynced() { +func (g *Cloud) GetAllCurrentZones() (sets.String, error) { + if g.nodeInformerSynced == nil { + klog.Warningf("Cloud object does not have informers set, should only happen in E2E binary.") + return g.GetAllZonesFromCloudProvider() + } + g.nodeZonesLock.Lock() + defer g.nodeZonesLock.Unlock() + if !g.nodeInformerSynced() { return nil, fmt.Errorf("node informer is not synced when trying to GetAllCurrentZones") } zones := sets.NewString() - for zone, nodes := range gce.nodeZones { + for zone, nodes := range g.nodeZones { if len(nodes) > 0 { zones.Insert(zone) } @@ -300,13 +302,13 @@ func (gce *GCECloud) GetAllCurrentZones() (sets.String, error) { // a non-k8s compute in us-central1-a. This func will return a,b, and c. // // TODO: this should be removed from the cloud provider. -func (gce *GCECloud) GetAllZonesFromCloudProvider() (sets.String, error) { +func (g *Cloud) GetAllZonesFromCloudProvider() (sets.String, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() zones := sets.NewString() - for _, zone := range gce.managedZones { - instances, err := gce.c.Instances().List(ctx, zone, filter.None) + for _, zone := range g.managedZones { + instances, err := g.c.Instances().List(ctx, zone, filter.None) if err != nil { return sets.NewString(), err } @@ -318,22 +320,22 @@ func (gce *GCECloud) GetAllZonesFromCloudProvider() (sets.String, error) { } // InsertInstance creates a new instance on GCP -func (gce *GCECloud) InsertInstance(project string, zone string, i *compute.Instance) error { +func (g *Cloud) InsertInstance(project string, zone string, i *compute.Instance) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstancesMetricContext("create", zone) - return mc.Observe(gce.c.Instances().Insert(ctx, meta.ZonalKey(i.Name, zone), i)) + return mc.Observe(g.c.Instances().Insert(ctx, meta.ZonalKey(i.Name, zone), i)) } // ListInstanceNames returns a string of instance names separated by spaces. // This method should only be used for e2e testing. // TODO: remove this method. -func (gce *GCECloud) ListInstanceNames(project, zone string) (string, error) { +func (g *Cloud) ListInstanceNames(project, zone string) (string, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - l, err := gce.c.Instances().List(ctx, zone, filter.None) + l, err := g.c.Instances().List(ctx, zone, filter.None) if err != nil { return "", err } @@ -345,33 +347,34 @@ func (gce *GCECloud) ListInstanceNames(project, zone string) (string, error) { } // DeleteInstance deletes an instance specified by project, zone, and name -func (gce *GCECloud) DeleteInstance(project, zone, name string) error { +func (g *Cloud) DeleteInstance(project, zone, name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - return gce.c.Instances().Delete(ctx, meta.ZonalKey(name, zone)) + return g.c.Instances().Delete(ctx, meta.ZonalKey(name, zone)) } -// Implementation of Instances.CurrentNodeName -func (gce *GCECloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) { +// CurrentNodeName returns the name of the node we are currently running on +// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname +func (g *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) { return types.NodeName(hostname), nil } // AliasRanges returns a list of CIDR ranges that are assigned to the // `node` for allocation to pods. Returns a list of the form // "/". -func (gce *GCECloud) AliasRanges(nodeName types.NodeName) (cidrs []string, err error) { +func (g *Cloud) AliasRanges(nodeName types.NodeName) (cidrs []string, err error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() var instance *gceInstance - instance, err = gce.getInstanceByName(mapNodeNameToInstanceName(nodeName)) + instance, err = g.getInstanceByName(mapNodeNameToInstanceName(nodeName)) if err != nil { return } var res *computebeta.Instance - res, err = gce.c.BetaInstances().Get(ctx, meta.ZonalKey(instance.Name, lastComponent(instance.Zone))) + res, err = g.c.BetaInstances().Get(ctx, meta.ZonalKey(instance.Name, lastComponent(instance.Zone))) if err != nil { return } @@ -386,15 +389,15 @@ func (gce *GCECloud) AliasRanges(nodeName types.NodeName) (cidrs []string, err e // AddAliasToInstance adds an alias to the given instance from the named // secondary range. -func (gce *GCECloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNet) error { +func (g *Cloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNet) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - v1instance, err := gce.getInstanceByName(mapNodeNameToInstanceName(nodeName)) + v1instance, err := g.getInstanceByName(mapNodeNameToInstanceName(nodeName)) if err != nil { return err } - instance, err := gce.c.BetaInstances().Get(ctx, meta.ZonalKey(v1instance.Name, lastComponent(v1instance.Zone))) + instance, err := g.c.BetaInstances().Get(ctx, meta.ZonalKey(v1instance.Name, lastComponent(v1instance.Zone))) if err != nil { return err } @@ -404,7 +407,7 @@ func (gce *GCECloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNe return fmt.Errorf("instance %q has no network interfaces", nodeName) case 1: default: - glog.Warningf("Instance %q has more than one network interface, using only the first (%v)", + klog.Warningf("Instance %q has more than one network interface, using only the first (%v)", nodeName, instance.NetworkInterfaces) } @@ -413,38 +416,38 @@ func (gce *GCECloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNe iface.Fingerprint = instance.NetworkInterfaces[0].Fingerprint iface.AliasIpRanges = append(iface.AliasIpRanges, &computebeta.AliasIpRange{ IpCidrRange: alias.String(), - SubnetworkRangeName: gce.secondaryRangeName, + SubnetworkRangeName: g.secondaryRangeName, }) mc := newInstancesMetricContext("add_alias", v1instance.Zone) - err = gce.c.BetaInstances().UpdateNetworkInterface(ctx, meta.ZonalKey(instance.Name, lastComponent(instance.Zone)), iface.Name, iface) + err = g.c.BetaInstances().UpdateNetworkInterface(ctx, meta.ZonalKey(instance.Name, lastComponent(instance.Zone)), iface.Name, iface) return mc.Observe(err) } // Gets the named instances, returning cloudprovider.InstanceNotFound if any // instance is not found -func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error) { +func (g *Cloud) getInstancesByNames(names []string) ([]*gceInstance, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() found := map[string]*gceInstance{} remaining := len(names) - nodeInstancePrefix := gce.nodeInstancePrefix + nodeInstancePrefix := g.nodeInstancePrefix for _, name := range names { name = canonicalizeInstanceName(name) - if !strings.HasPrefix(name, gce.nodeInstancePrefix) { - glog.Warningf("Instance %q does not conform to prefix %q, removing filter", name, gce.nodeInstancePrefix) + if !strings.HasPrefix(name, g.nodeInstancePrefix) { + klog.Warningf("Instance %q does not conform to prefix %q, removing filter", name, g.nodeInstancePrefix) nodeInstancePrefix = "" } found[name] = nil } - for _, zone := range gce.managedZones { + for _, zone := range g.managedZones { if remaining == 0 { break } - instances, err := gce.c.Instances().List(ctx, zone, filter.Regexp("name", nodeInstancePrefix+".*")) + instances, err := g.c.Instances().List(ctx, zone, filter.Regexp("name", nodeInstancePrefix+".*")) if err != nil { return nil, err } @@ -456,7 +459,7 @@ func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error) continue } if found[inst.Name] != nil { - glog.Errorf("Instance name %q was duplicated (in zone %q and %q)", inst.Name, zone, found[inst.Name].Zone) + klog.Errorf("Instance name %q was duplicated (in zone %q and %q)", inst.Name, zone, found[inst.Name].Zone) continue } found[inst.Name] = &gceInstance{ @@ -477,7 +480,7 @@ func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error) failed = append(failed, k) } } - glog.Errorf("Failed to retrieve instances: %v", failed) + klog.Errorf("Failed to retrieve instances: %v", failed) return nil, cloudprovider.InstanceNotFound } @@ -490,15 +493,15 @@ func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error) } // Gets the named instance, returning cloudprovider.InstanceNotFound if the instance is not found -func (gce *GCECloud) getInstanceByName(name string) (*gceInstance, error) { +func (g *Cloud) getInstanceByName(name string) (*gceInstance, error) { // Avoid changing behaviour when not managing multiple zones - for _, zone := range gce.managedZones { - instance, err := gce.getInstanceFromProjectInZoneByName(gce.projectID, zone, name) + for _, zone := range g.managedZones { + instance, err := g.getInstanceFromProjectInZoneByName(g.projectID, zone, name) if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { continue } - glog.Errorf("getInstanceByName: failed to get instance %s in zone %s; err: %v", name, zone, err) + klog.Errorf("getInstanceByName: failed to get instance %s in zone %s; err: %v", name, zone, err) return nil, err } return instance, nil @@ -507,13 +510,13 @@ func (gce *GCECloud) getInstanceByName(name string) (*gceInstance, error) { return nil, cloudprovider.InstanceNotFound } -func (gce *GCECloud) getInstanceFromProjectInZoneByName(project, zone, name string) (*gceInstance, error) { +func (g *Cloud) getInstanceFromProjectInZoneByName(project, zone, name string) (*gceInstance, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() name = canonicalizeInstanceName(name) mc := newInstancesMetricContext("get", zone) - res, err := gce.c.Instances().Get(ctx, meta.ZonalKey(name, zone)) + res, err := g.c.Instances().Get(ctx, meta.ZonalKey(name, zone)) mc.Observe(err) if err != nil { return nil, err @@ -554,11 +557,11 @@ func getCurrentMachineTypeViaMetadata() (string, error) { // isCurrentInstance uses metadata server to check if specified // instanceID matches current machine's instanceID -func (gce *GCECloud) isCurrentInstance(instanceID string) bool { +func (g *Cloud) isCurrentInstance(instanceID string) bool { currentInstanceID, err := getInstanceIDViaMetadata() if err != nil { // Log and swallow error - glog.Errorf("Failed to fetch instanceID via Metadata: %v", err) + klog.Errorf("Failed to fetch instanceID via Metadata: %v", err) return false } @@ -571,16 +574,16 @@ func (gce *GCECloud) isCurrentInstance(instanceID string) bool { // Invoking this method to get host tags is risky since it depends on the // format of the host names in the cluster. Only use it as a fallback if // gce.nodeTags is unspecified -func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) { +func (g *Cloud) computeHostTags(hosts []*gceInstance) ([]string, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() // TODO: We could store the tags in gceInstance, so we could have already fetched it hostNamesByZone := make(map[string]map[string]bool) // map of zones -> map of names -> bool (for easy lookup) - nodeInstancePrefix := gce.nodeInstancePrefix + nodeInstancePrefix := g.nodeInstancePrefix for _, host := range hosts { - if !strings.HasPrefix(host.Name, gce.nodeInstancePrefix) { - glog.Warningf("instance %v does not conform to prefix '%s', ignoring filter", host, gce.nodeInstancePrefix) + if !strings.HasPrefix(host.Name, g.nodeInstancePrefix) { + klog.Warningf("instance %v does not conform to prefix '%s', ignoring filter", host, g.nodeInstancePrefix) nodeInstancePrefix = "" } @@ -599,7 +602,7 @@ func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) { filt = filter.Regexp("name", nodeInstancePrefix+".*") } for zone, hostNames := range hostNamesByZone { - instances, err := gce.c.Instances().List(ctx, zone, filt) + instances, err := g.c.Instances().List(ctx, zone, filt) if err != nil { return nil, err } @@ -607,14 +610,14 @@ func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) { if !hostNames[instance.Name] { continue } - longest_tag := "" + longestTag := "" for _, tag := range instance.Tags.Items { - if strings.HasPrefix(instance.Name, tag) && len(tag) > len(longest_tag) { - longest_tag = tag + if strings.HasPrefix(instance.Name, tag) && len(tag) > len(longestTag) { + longestTag = tag } } - if len(longest_tag) > 0 { - tags.Insert(longest_tag) + if len(longestTag) > 0 { + tags.Insert(longestTag) } else { return nil, fmt.Errorf("could not find any tag that is a prefix of instance name for instance %s", instance.Name) } @@ -629,35 +632,35 @@ func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) { // GetNodeTags will first try returning the list of tags specified in GCE cloud Configuration. // If they weren't provided, it'll compute the host tags with the given hostnames. If the list // of hostnames has not changed, a cached set of nodetags are returned. -func (gce *GCECloud) GetNodeTags(nodeNames []string) ([]string, error) { +func (g *Cloud) GetNodeTags(nodeNames []string) ([]string, error) { // If nodeTags were specified through configuration, use them - if len(gce.nodeTags) > 0 { - return gce.nodeTags, nil + if len(g.nodeTags) > 0 { + return g.nodeTags, nil } - gce.computeNodeTagLock.Lock() - defer gce.computeNodeTagLock.Unlock() + g.computeNodeTagLock.Lock() + defer g.computeNodeTagLock.Unlock() // Early return if hosts have not changed hosts := sets.NewString(nodeNames...) - if hosts.Equal(gce.lastKnownNodeNames) { - return gce.lastComputedNodeTags, nil + if hosts.Equal(g.lastKnownNodeNames) { + return g.lastComputedNodeTags, nil } // Get GCE instance data by hostname - instances, err := gce.getInstancesByNames(nodeNames) + instances, err := g.getInstancesByNames(nodeNames) if err != nil { return nil, err } // Determine list of host tags - tags, err := gce.computeHostTags(instances) + tags, err := g.computeHostTags(instances) if err != nil { return nil, err } // Save the list of tags - gce.lastKnownNodeNames = hosts - gce.lastComputedNodeTags = tags + g.lastKnownNodeNames = hosts + g.lastComputedNodeTags = tags return tags, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go index 86eea9f87d4c6..35a2c6952f4ac 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go @@ -24,10 +24,10 @@ import ( "sort" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" netsets "k8s.io/kubernetes/pkg/util/net/sets" ) @@ -41,10 +41,6 @@ var ( lbSrcRngsFlag cidrs ) -func newLoadBalancerMetricContext(request, region string) *metricContext { - return newGenericMetricContext("loadbalancer", request, region, unusedMetricLabel, computeV1Version) -} - func init() { var err error // LB L7 proxies and all L3/4/7 health checkers have client addresses within these known CIDRs. @@ -91,9 +87,9 @@ func LoadBalancerSrcRanges() []string { } // GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer -func (gce *GCECloud) GetLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service) (*v1.LoadBalancerStatus, bool, error) { - loadBalancerName := gce.GetLoadBalancerName(ctx, clusterName, svc) - fwd, err := gce.GetRegionForwardingRule(loadBalancerName, gce.region) +func (g *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service) (*v1.LoadBalancerStatus, bool, error) { + loadBalancerName := g.GetLoadBalancerName(ctx, clusterName, svc) + fwd, err := g.GetRegionForwardingRule(loadBalancerName, g.region) if err == nil { status := &v1.LoadBalancerStatus{} status.Ingress = []v1.LoadBalancerIngress{{IP: fwd.IPAddress}} @@ -104,23 +100,23 @@ func (gce *GCECloud) GetLoadBalancer(ctx context.Context, clusterName string, sv } // GetLoadBalancerName is an implementation of LoadBalancer.GetLoadBalancerName. -func (gce *GCECloud) GetLoadBalancerName(ctx context.Context, clusterName string, svc *v1.Service) string { +func (g *Cloud) GetLoadBalancerName(ctx context.Context, clusterName string, svc *v1.Service) string { // TODO: replace DefaultLoadBalancerName to generate more meaningful loadbalancer names. return cloudprovider.DefaultLoadBalancerName(svc) } // EnsureLoadBalancer is an implementation of LoadBalancer.EnsureLoadBalancer. -func (gce *GCECloud) EnsureLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { - loadBalancerName := gce.GetLoadBalancerName(ctx, clusterName, svc) +func (g *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { + loadBalancerName := g.GetLoadBalancerName(ctx, clusterName, svc) desiredScheme := getSvcScheme(svc) - clusterID, err := gce.ClusterID.GetID() + clusterID, err := g.ClusterID.GetID() if err != nil { return nil, err } - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): ensure %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, desiredScheme) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): ensure %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, desiredScheme) - existingFwdRule, err := gce.GetRegionForwardingRule(loadBalancerName, gce.region) + existingFwdRule, err := g.GetRegionForwardingRule(loadBalancerName, g.region) if err != nil && !isNotFound(err) { return nil, err } @@ -130,14 +126,14 @@ func (gce *GCECloud) EnsureLoadBalancer(ctx context.Context, clusterName string, // If the loadbalancer type changes between INTERNAL and EXTERNAL, the old load balancer should be deleted. if existingScheme != desiredScheme { - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): deleting existing %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, existingScheme) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): deleting existing %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, existingScheme) switch existingScheme { case cloud.SchemeInternal: - err = gce.ensureInternalLoadBalancerDeleted(clusterName, clusterID, svc) + err = g.ensureInternalLoadBalancerDeleted(clusterName, clusterID, svc) default: - err = gce.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc) + err = g.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc) } - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done deleting existing %v loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, existingScheme, err) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done deleting existing %v loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, existingScheme, err) if err != nil { return nil, err } @@ -150,53 +146,53 @@ func (gce *GCECloud) EnsureLoadBalancer(ctx context.Context, clusterName string, var status *v1.LoadBalancerStatus switch desiredScheme { case cloud.SchemeInternal: - status, err = gce.ensureInternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes) + status, err = g.ensureInternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes) default: - status, err = gce.ensureExternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes) + status, err = g.ensureExternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes) } - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done ensuring loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, err) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done ensuring loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) return status, err } // UpdateLoadBalancer is an implementation of LoadBalancer.UpdateLoadBalancer. -func (gce *GCECloud) UpdateLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service, nodes []*v1.Node) error { - loadBalancerName := gce.GetLoadBalancerName(ctx, clusterName, svc) +func (g *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service, nodes []*v1.Node) error { + loadBalancerName := g.GetLoadBalancerName(ctx, clusterName, svc) scheme := getSvcScheme(svc) - clusterID, err := gce.ClusterID.GetID() + clusterID, err := g.ClusterID.GetID() if err != nil { return err } - glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): updating with %d nodes", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, len(nodes)) + klog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): updating with %d nodes", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, len(nodes)) switch scheme { case cloud.SchemeInternal: - err = gce.updateInternalLoadBalancer(clusterName, clusterID, svc, nodes) + err = g.updateInternalLoadBalancer(clusterName, clusterID, svc, nodes) default: - err = gce.updateExternalLoadBalancer(clusterName, svc, nodes) + err = g.updateExternalLoadBalancer(clusterName, svc, nodes) } - glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): done updating. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, err) + klog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): done updating. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) return err } // EnsureLoadBalancerDeleted is an implementation of LoadBalancer.EnsureLoadBalancerDeleted. -func (gce *GCECloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, svc *v1.Service) error { - loadBalancerName := gce.GetLoadBalancerName(ctx, clusterName, svc) +func (g *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, svc *v1.Service) error { + loadBalancerName := g.GetLoadBalancerName(ctx, clusterName, svc) scheme := getSvcScheme(svc) - clusterID, err := gce.ClusterID.GetID() + clusterID, err := g.ClusterID.GetID() if err != nil { return err } - glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): deleting loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region) + klog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): deleting loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region) switch scheme { case cloud.SchemeInternal: - err = gce.ensureInternalLoadBalancerDeleted(clusterName, clusterID, svc) + err = g.ensureInternalLoadBalancerDeleted(clusterName, clusterID, svc) default: - err = gce.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc) + err = g.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc) } - glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): done deleting loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, err) + klog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): done deleting loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go index 4efea8ab97492..6b92e71ef77f5 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go @@ -31,9 +31,9 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" netsets "k8s.io/kubernetes/pkg/util/net/sets" - "github.com/golang/glog" computealpha "google.golang.org/api/compute/v0.alpha" compute "google.golang.org/api/compute/v1" + "k8s.io/klog" ) // ensureExternalLoadBalancer is the external implementation of LoadBalancer.EnsureLoadBalancer. @@ -44,19 +44,19 @@ import ( // Due to an interesting series of design decisions, this handles both creating // new load balancers and updating existing load balancers, recognizing when // each is needed. -func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID string, apiService *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { +func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, apiService *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { if len(nodes) == 0 { return nil, fmt.Errorf("Cannot EnsureLoadBalancer() with no hosts") } hostNames := nodeNames(nodes) supportsNodesHealthCheck := supportsNodesHealthCheck(nodes) - hosts, err := gce.getInstancesByNames(hostNames) + hosts, err := g.getInstancesByNames(hostNames) if err != nil { return nil, err } - loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, apiService) + loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, apiService) requestedIP := apiService.Spec.LoadBalancerIP ports := apiService.Spec.Ports portStr := []string{} @@ -66,27 +66,27 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name} lbRefStr := fmt.Sprintf("%v(%v)", loadBalancerName, serviceName) - glog.V(2).Infof("ensureExternalLoadBalancer(%s, %v, %v, %v, %v, %v)", lbRefStr, gce.region, requestedIP, portStr, hostNames, apiService.Annotations) + klog.V(2).Infof("ensureExternalLoadBalancer(%s, %v, %v, %v, %v, %v)", lbRefStr, g.region, requestedIP, portStr, hostNames, apiService.Annotations) // Check the current and the desired network tiers. If they do not match, // tear down the existing resources with the wrong tier. - netTier, err := gce.getServiceNetworkTier(apiService) + netTier, err := g.getServiceNetworkTier(apiService) if err != nil { - glog.Errorf("ensureExternalLoadBalancer(%s): Failed to get the desired network tier: %v.", lbRefStr, err) + klog.Errorf("ensureExternalLoadBalancer(%s): Failed to get the desired network tier: %v.", lbRefStr, err) return nil, err } - glog.V(4).Infof("ensureExternalLoadBalancer(%s): Desired network tier %q.", lbRefStr, netTier) - if gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { - gce.deleteWrongNetworkTieredResources(loadBalancerName, lbRefStr, netTier) + klog.V(4).Infof("ensureExternalLoadBalancer(%s): Desired network tier %q.", lbRefStr, netTier) + if g.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { + g.deleteWrongNetworkTieredResources(loadBalancerName, lbRefStr, netTier) } // Check if the forwarding rule exists, and if so, what its IP is. - fwdRuleExists, fwdRuleNeedsUpdate, fwdRuleIP, err := gce.forwardingRuleNeedsUpdate(loadBalancerName, gce.region, requestedIP, ports) + fwdRuleExists, fwdRuleNeedsUpdate, fwdRuleIP, err := g.forwardingRuleNeedsUpdate(loadBalancerName, g.region, requestedIP, ports) if err != nil { return nil, err } if !fwdRuleExists { - glog.V(2).Infof("ensureExternalLoadBalancer(%s): Forwarding rule %v doesn't exist.", lbRefStr, loadBalancerName) + klog.V(2).Infof("ensureExternalLoadBalancer(%s): Forwarding rule %v doesn't exist.", lbRefStr, loadBalancerName) } // Make sure we know which IP address will be used and have properly reserved @@ -120,22 +120,22 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st return } if isSafeToReleaseIP { - if err := gce.DeleteRegionAddress(loadBalancerName, gce.region); err != nil && !isNotFound(err) { - glog.Errorf("ensureExternalLoadBalancer(%s): Failed to release static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, gce.region, err) + if err := g.DeleteRegionAddress(loadBalancerName, g.region); err != nil && !isNotFound(err) { + klog.Errorf("ensureExternalLoadBalancer(%s): Failed to release static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, g.region, err) } else if isNotFound(err) { - glog.V(2).Infof("ensureExternalLoadBalancer(%s): IP address %s is not reserved.", lbRefStr, ipAddressToUse) + klog.V(2).Infof("ensureExternalLoadBalancer(%s): IP address %s is not reserved.", lbRefStr, ipAddressToUse) } else { - glog.Infof("ensureExternalLoadBalancer(%s): Released static IP %s.", lbRefStr, ipAddressToUse) + klog.Infof("ensureExternalLoadBalancer(%s): Released static IP %s.", lbRefStr, ipAddressToUse) } } else { - glog.Warningf("ensureExternalLoadBalancer(%s): Orphaning static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, gce.region, err) + klog.Warningf("ensureExternalLoadBalancer(%s): Orphaning static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, g.region, err) } }() if requestedIP != "" { // If user requests a specific IP address, verify first. No mutation to // the GCE resources will be performed in the verification process. - isUserOwnedIP, err = verifyUserRequestedIP(gce, gce.region, requestedIP, fwdRuleIP, lbRefStr, netTier) + isUserOwnedIP, err = verifyUserRequestedIP(g, g.region, requestedIP, fwdRuleIP, lbRefStr, netTier) if err != nil { return nil, err } @@ -145,11 +145,11 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st if !isUserOwnedIP { // If we are not using the user-owned IP, either promote the // emphemeral IP used by the fwd rule, or create a new static IP. - ipAddr, existed, err := ensureStaticIP(gce, loadBalancerName, serviceName.String(), gce.region, fwdRuleIP, netTier) + ipAddr, existed, err := ensureStaticIP(g, loadBalancerName, serviceName.String(), g.region, fwdRuleIP, netTier) if err != nil { return nil, fmt.Errorf("failed to ensure a static IP for load balancer (%s): %v", lbRefStr, err) } - glog.Infof("ensureExternalLoadBalancer(%s): Ensured IP address %s (tier: %s).", lbRefStr, ipAddr, netTier) + klog.Infof("ensureExternalLoadBalancer(%s): Ensured IP address %s (tier: %s).", lbRefStr, ipAddr, netTier) // If the IP was not owned by the user, but it already existed, it // could indicate that the previous update cycle failed. We can use // this IP and try to run through the process again, but we should @@ -167,7 +167,7 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st return nil, err } - firewallExists, firewallNeedsUpdate, err := gce.firewallNeedsUpdate(loadBalancerName, serviceName.String(), gce.region, ipAddressToUse, ports, sourceRanges) + firewallExists, firewallNeedsUpdate, err := g.firewallNeedsUpdate(loadBalancerName, serviceName.String(), g.region, ipAddressToUse, ports, sourceRanges) if err != nil { return nil, err } @@ -177,60 +177,60 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st // Unlike forwarding rules and target pools, firewalls can be updated // without needing to be deleted and recreated. if firewallExists { - glog.Infof("ensureExternalLoadBalancer(%s): Updating firewall.", lbRefStr) - if err := gce.updateFirewall(apiService, MakeFirewallName(loadBalancerName), gce.region, desc, sourceRanges, ports, hosts); err != nil { + klog.Infof("ensureExternalLoadBalancer(%s): Updating firewall.", lbRefStr) + if err := g.updateFirewall(apiService, MakeFirewallName(loadBalancerName), g.region, desc, sourceRanges, ports, hosts); err != nil { return nil, err } - glog.Infof("ensureExternalLoadBalancer(%s): Updated firewall.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Updated firewall.", lbRefStr) } else { - glog.Infof("ensureExternalLoadBalancer(%s): Creating firewall.", lbRefStr) - if err := gce.createFirewall(apiService, MakeFirewallName(loadBalancerName), gce.region, desc, sourceRanges, ports, hosts); err != nil { + klog.Infof("ensureExternalLoadBalancer(%s): Creating firewall.", lbRefStr) + if err := g.createFirewall(apiService, MakeFirewallName(loadBalancerName), g.region, desc, sourceRanges, ports, hosts); err != nil { return nil, err } - glog.Infof("ensureExternalLoadBalancer(%s): Created firewall.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Created firewall.", lbRefStr) } } - tpExists, tpNeedsRecreation, err := gce.targetPoolNeedsRecreation(loadBalancerName, gce.region, apiService.Spec.SessionAffinity) + tpExists, tpNeedsRecreation, err := g.targetPoolNeedsRecreation(loadBalancerName, g.region, apiService.Spec.SessionAffinity) if err != nil { return nil, err } if !tpExists { - glog.Infof("ensureExternalLoadBalancer(%s): Target pool for service doesn't exist.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Target pool for service doesn't exist.", lbRefStr) } // Check which health check needs to create and which health check needs to delete. // Health check management is coupled with target pool operation to prevent leaking. var hcToCreate, hcToDelete *compute.HttpHealthCheck - hcLocalTrafficExisting, err := gce.GetHttpHealthCheck(loadBalancerName) + hcLocalTrafficExisting, err := g.GetHTTPHealthCheck(loadBalancerName) if err != nil && !isHTTPErrorCode(err, http.StatusNotFound) { return nil, fmt.Errorf("error checking HTTP health check for load balancer (%s): %v", lbRefStr, err) } if path, healthCheckNodePort := apiservice.GetServiceHealthCheckPathPort(apiService); path != "" { - glog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs local traffic health checks on: %d%s.", lbRefStr, healthCheckNodePort, path) + klog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs local traffic health checks on: %d%s.", lbRefStr, healthCheckNodePort, path) if hcLocalTrafficExisting == nil { // This logic exists to detect a transition for non-OnlyLocal to OnlyLocal service // turn on the tpNeedsRecreation flag to delete/recreate fwdrule/tpool updating the // target pool to use local traffic health check. - glog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from nodes health checks to local traffic health checks.", lbRefStr) + klog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from nodes health checks to local traffic health checks.", lbRefStr) if supportsNodesHealthCheck { - hcToDelete = makeHttpHealthCheck(MakeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort()) + hcToDelete = makeHTTPHealthCheck(MakeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort()) } tpNeedsRecreation = true } - hcToCreate = makeHttpHealthCheck(loadBalancerName, path, healthCheckNodePort) + hcToCreate = makeHTTPHealthCheck(loadBalancerName, path, healthCheckNodePort) } else { - glog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs nodes health checks.", lbRefStr) + klog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs nodes health checks.", lbRefStr) if hcLocalTrafficExisting != nil { // This logic exists to detect a transition from OnlyLocal to non-OnlyLocal service // and turn on the tpNeedsRecreation flag to delete/recreate fwdrule/tpool updating the // target pool to use nodes health check. - glog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from local traffic health checks to nodes health checks.", lbRefStr) + klog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from local traffic health checks to nodes health checks.", lbRefStr) hcToDelete = hcLocalTrafficExisting tpNeedsRecreation = true } if supportsNodesHealthCheck { - hcToCreate = makeHttpHealthCheck(MakeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort()) + hcToCreate = makeHTTPHealthCheck(MakeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort()) } } // Now we get to some slightly more interesting logic. @@ -245,19 +245,19 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st // and something should fail before we recreate it, don't release the // IP. That way we can come back to it later. isSafeToReleaseIP = false - if err := gce.DeleteRegionForwardingRule(loadBalancerName, gce.region); err != nil && !isNotFound(err) { + if err := g.DeleteRegionForwardingRule(loadBalancerName, g.region); err != nil && !isNotFound(err) { return nil, fmt.Errorf("failed to delete existing forwarding rule for load balancer (%s) update: %v", lbRefStr, err) } - glog.Infof("ensureExternalLoadBalancer(%s): Deleted forwarding rule.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Deleted forwarding rule.", lbRefStr) } - if err := gce.ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation, apiService, loadBalancerName, clusterID, ipAddressToUse, hosts, hcToCreate, hcToDelete); err != nil { + if err := g.ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation, apiService, loadBalancerName, clusterID, ipAddressToUse, hosts, hcToCreate, hcToDelete); err != nil { return nil, err } if tpNeedsRecreation || fwdRuleNeedsUpdate { - glog.Infof("ensureExternalLoadBalancer(%s): Creating forwarding rule, IP %s (tier: %s).", lbRefStr, ipAddressToUse, netTier) - if err := createForwardingRule(gce, loadBalancerName, serviceName.String(), gce.region, ipAddressToUse, gce.targetPoolURL(loadBalancerName), ports, netTier); err != nil { + klog.Infof("ensureExternalLoadBalancer(%s): Creating forwarding rule, IP %s (tier: %s).", lbRefStr, ipAddressToUse, netTier) + if err := createForwardingRule(g, loadBalancerName, serviceName.String(), g.region, ipAddressToUse, g.targetPoolURL(loadBalancerName), ports, netTier); err != nil { return nil, fmt.Errorf("failed to create forwarding rule for load balancer (%s): %v", lbRefStr, err) } // End critical section. It is safe to release the static IP (which @@ -265,7 +265,7 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st // of a user-requested IP, the "is user-owned" flag will be set, // preventing it from actually being released. isSafeToReleaseIP = true - glog.Infof("ensureExternalLoadBalancer(%s): Created forwarding rule, IP %s.", lbRefStr, ipAddressToUse) + klog.Infof("ensureExternalLoadBalancer(%s): Created forwarding rule, IP %s.", lbRefStr, ipAddressToUse) } status := &v1.LoadBalancerStatus{} @@ -275,27 +275,27 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st } // updateExternalLoadBalancer is the external implementation of LoadBalancer.UpdateLoadBalancer. -func (gce *GCECloud) updateExternalLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error { - hosts, err := gce.getInstancesByNames(nodeNames(nodes)) +func (g *Cloud) updateExternalLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error { + hosts, err := g.getInstancesByNames(nodeNames(nodes)) if err != nil { return err } - loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, service) - return gce.updateTargetPool(loadBalancerName, hosts) + loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, service) + return g.updateTargetPool(loadBalancerName, hosts) } // ensureExternalLoadBalancerDeleted is the external implementation of LoadBalancer.EnsureLoadBalancerDeleted -func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID string, service *v1.Service) error { - loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, service) +func (g *Cloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID string, service *v1.Service) error { + loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, service) serviceName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} lbRefStr := fmt.Sprintf("%v(%v)", loadBalancerName, serviceName) var hcNames []string if path, _ := apiservice.GetServiceHealthCheckPathPort(service); path != "" { - hcToDelete, err := gce.GetHttpHealthCheck(loadBalancerName) + hcToDelete, err := g.GetHTTPHealthCheck(loadBalancerName) if err != nil && !isHTTPErrorCode(err, http.StatusNotFound) { - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Failed to retrieve health check:%v.", lbRefStr, err) + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Failed to retrieve health check:%v.", lbRefStr, err) return err } // If we got 'StatusNotFound' LB was already deleted and it's safe to ignore. @@ -313,12 +313,12 @@ func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID st errs := utilerrors.AggregateGoroutines( func() error { - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting firewall rule.", lbRefStr) + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting firewall rule.", lbRefStr) fwName := MakeFirewallName(loadBalancerName) - err := ignoreNotFound(gce.DeleteFirewall(fwName)) - if isForbidden(err) && gce.OnXPN() { - glog.V(4).Infof("ensureExternalLoadBalancerDeleted(%s): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) - gce.raiseFirewallChangeNeededEvent(service, FirewallToGCloudDeleteCmd(fwName, gce.NetworkProjectID())) + err := ignoreNotFound(g.DeleteFirewall(fwName)) + if isForbidden(err) && g.OnXPN() { + klog.V(4).Infof("ensureExternalLoadBalancerDeleted(%s): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) + g.raiseFirewallChangeNeededEvent(service, FirewallToGCloudDeleteCmd(fwName, g.NetworkProjectID())) return nil } return err @@ -327,18 +327,18 @@ func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID st // possible that EnsureLoadBalancer left one around in a failed // creation/update attempt, so make sure we clean it up here just in case. func() error { - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting IP address.", lbRefStr) - return ignoreNotFound(gce.DeleteRegionAddress(loadBalancerName, gce.region)) + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting IP address.", lbRefStr) + return ignoreNotFound(g.DeleteRegionAddress(loadBalancerName, g.region)) }, func() error { - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting forwarding rule.", lbRefStr) + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting forwarding rule.", lbRefStr) // The forwarding rule must be deleted before either the target pool can, // unfortunately, so we have to do these two serially. - if err := ignoreNotFound(gce.DeleteRegionForwardingRule(loadBalancerName, gce.region)); err != nil { + if err := ignoreNotFound(g.DeleteRegionForwardingRule(loadBalancerName, g.region)); err != nil { return err } - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting target pool.", lbRefStr) - if err := gce.DeleteExternalTargetPoolAndChecks(service, loadBalancerName, gce.region, clusterID, hcNames...); err != nil { + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting target pool.", lbRefStr) + if err := g.DeleteExternalTargetPoolAndChecks(service, loadBalancerName, g.region, clusterID, hcNames...); err != nil { return err } return nil @@ -350,14 +350,15 @@ func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID st return nil } -func (gce *GCECloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name, region, clusterID string, hcNames ...string) error { +// DeleteExternalTargetPoolAndChecks Deletes an external load balancer pool and verifies the operation +func (g *Cloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name, region, clusterID string, hcNames ...string) error { serviceName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} lbRefStr := fmt.Sprintf("%v(%v)", name, serviceName) - if err := gce.DeleteTargetPool(name, region); err != nil && isHTTPErrorCode(err, http.StatusNotFound) { - glog.Infof("DeleteExternalTargetPoolAndChecks(%v): Target pool already deleted. Continuing to delete other resources.", lbRefStr) + if err := g.DeleteTargetPool(name, region); err != nil && isHTTPErrorCode(err, http.StatusNotFound) { + klog.Infof("DeleteExternalTargetPoolAndChecks(%v): Target pool already deleted. Continuing to delete other resources.", lbRefStr) } else if err != nil { - glog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete target pool, got error %s.", lbRefStr, err.Error()) + klog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete target pool, got error %s.", lbRefStr, err.Error()) return err } @@ -369,17 +370,17 @@ func (gce *GCECloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name if isNodesHealthCheck { // Lock to prevent deleting necessary nodes health check before it gets attached // to target pool. - gce.sharedResourceLock.Lock() - defer gce.sharedResourceLock.Unlock() + g.sharedResourceLock.Lock() + defer g.sharedResourceLock.Unlock() } - glog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check %v.", lbRefStr, hcName) - if err := gce.DeleteHttpHealthCheck(hcName); err != nil { + klog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check %v.", lbRefStr, hcName) + if err := g.DeleteHTTPHealthCheck(hcName); err != nil { // Delete nodes health checks will fail if any other target pool is using it. if isInUsedByError(err) { - glog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is in used: %v.", lbRefStr, hcName, err) + klog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is in used: %v.", lbRefStr, hcName, err) return nil } else if !isHTTPErrorCode(err, http.StatusNotFound) { - glog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete health check %v: %v.", lbRefStr, hcName, err) + klog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete health check %v: %v.", lbRefStr, hcName, err) return err } // StatusNotFound could happen when: @@ -389,16 +390,16 @@ func (gce *GCECloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name // - This is a retry and in previous round we failed to delete the healthcheck firewall // after deleted the healthcheck. // We continue to delete the healthcheck firewall to prevent leaking. - glog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is already deleted.", lbRefStr, hcName) + klog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is already deleted.", lbRefStr, hcName) } // If health check is deleted without error, it means no load-balancer is using it. // So we should delete the health check firewall as well. fwName := MakeHealthCheckFirewallName(clusterID, hcName, isNodesHealthCheck) - glog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check firewall %v.", lbRefStr, fwName) - if err := ignoreNotFound(gce.DeleteFirewall(fwName)); err != nil { - if isForbidden(err) && gce.OnXPN() { - glog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) - gce.raiseFirewallChangeNeededEvent(service, FirewallToGCloudDeleteCmd(fwName, gce.NetworkProjectID())) + klog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check firewall %v.", lbRefStr, fwName) + if err := ignoreNotFound(g.DeleteFirewall(fwName)); err != nil { + if isForbidden(err) && g.OnXPN() { + klog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) + g.raiseFirewallChangeNeededEvent(service, FirewallToGCloudDeleteCmd(fwName, g.NetworkProjectID())) return nil } return err @@ -428,7 +429,7 @@ func verifyUserRequestedIP(s CloudAddressService, region, requestedIP, fwdRuleIP // case we shouldn't delete it anyway). existingAddress, err := s.GetRegionAddressByIP(region, requestedIP) if err != nil && !isNotFound(err) { - glog.Errorf("verifyUserRequestedIP: failed to check whether the requested IP %q for LB %s exists: %v", requestedIP, lbRef, err) + klog.Errorf("verifyUserRequestedIP: failed to check whether the requested IP %q for LB %s exists: %v", requestedIP, lbRef, err) return false, err } if err == nil { @@ -442,27 +443,27 @@ func verifyUserRequestedIP(s CloudAddressService, region, requestedIP, fwdRuleIP } netTier := cloud.NetworkTierGCEValueToType(netTierStr) if netTier != desiredNetTier { - glog.Errorf("verifyUserRequestedIP: requested static IP %q (name: %s) for LB %s has network tier %s, need %s.", requestedIP, existingAddress.Name, lbRef, netTier, desiredNetTier) + klog.Errorf("verifyUserRequestedIP: requested static IP %q (name: %s) for LB %s has network tier %s, need %s.", requestedIP, existingAddress.Name, lbRef, netTier, desiredNetTier) return false, fmt.Errorf("requrested IP %q belongs to the %s network tier; expected %s", requestedIP, netTier, desiredNetTier) } - glog.V(4).Infof("verifyUserRequestedIP: the requested static IP %q (name: %s, tier: %s) for LB %s exists.", requestedIP, existingAddress.Name, netTier, lbRef) + klog.V(4).Infof("verifyUserRequestedIP: the requested static IP %q (name: %s, tier: %s) for LB %s exists.", requestedIP, existingAddress.Name, netTier, lbRef) return true, nil } if requestedIP == fwdRuleIP { // The requested IP is not a static IP, but is currently assigned // to this forwarding rule, so we can just use it. - glog.V(4).Infof("verifyUserRequestedIP: the requested IP %q is not static, but is currently in use by for LB %s", requestedIP, lbRef) + klog.V(4).Infof("verifyUserRequestedIP: the requested IP %q is not static, but is currently in use by for LB %s", requestedIP, lbRef) return false, nil } // The requested IP is not static and it is not assigned to the // current forwarding rule. It might be attached to a different // rule or it might not be part of this project at all. Either // way, we can't use it. - glog.Errorf("verifyUserRequestedIP: requested IP %q for LB %s is neither static nor assigned to the LB", requestedIP, lbRef) + klog.Errorf("verifyUserRequestedIP: requested IP %q for LB %s is neither static nor assigned to the LB", requestedIP, lbRef) return false, fmt.Errorf("requested ip %q is neither static nor assigned to the LB", requestedIP) } -func (gce *GCECloud) ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation bool, svc *v1.Service, loadBalancerName, clusterID, ipAddressToUse string, hosts []*gceInstance, hcToCreate, hcToDelete *compute.HttpHealthCheck) error { +func (g *Cloud) ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation bool, svc *v1.Service, loadBalancerName, clusterID, ipAddressToUse string, hosts []*gceInstance, hcToCreate, hcToDelete *compute.HttpHealthCheck) error { serviceName := types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name} lbRefStr := fmt.Sprintf("%v(%v)", loadBalancerName, serviceName) @@ -472,10 +473,10 @@ func (gce *GCECloud) ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation if hcToDelete != nil { hcNames = append(hcNames, hcToDelete.Name) } - if err := gce.DeleteExternalTargetPoolAndChecks(svc, loadBalancerName, gce.region, clusterID, hcNames...); err != nil { + if err := g.DeleteExternalTargetPoolAndChecks(svc, loadBalancerName, g.region, clusterID, hcNames...); err != nil { return fmt.Errorf("failed to delete existing target pool for load balancer (%s) update: %v", lbRefStr, err) } - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Deleted target pool.", lbRefStr) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Deleted target pool.", lbRefStr) } // Once we've deleted the resources (if necessary), build them back up (or for // the first time if they're new). @@ -484,40 +485,40 @@ func (gce *GCECloud) ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation if len(hosts) > maxTargetPoolCreateInstances { createInstances = createInstances[:maxTargetPoolCreateInstances] } - if err := gce.createTargetPoolAndHealthCheck(svc, loadBalancerName, serviceName.String(), ipAddressToUse, gce.region, clusterID, createInstances, hcToCreate); err != nil { + if err := g.createTargetPoolAndHealthCheck(svc, loadBalancerName, serviceName.String(), ipAddressToUse, g.region, clusterID, createInstances, hcToCreate); err != nil { return fmt.Errorf("failed to create target pool for load balancer (%s): %v", lbRefStr, err) } if hcToCreate != nil { - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Created health checks %v.", lbRefStr, hcToCreate.Name) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Created health checks %v.", lbRefStr, hcToCreate.Name) } if len(hosts) <= maxTargetPoolCreateInstances { - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Created target pool.", lbRefStr) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Created target pool.", lbRefStr) } else { - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Created initial target pool (now updating the remaining %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) - if err := gce.updateTargetPool(loadBalancerName, hosts); err != nil { + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Created initial target pool (now updating the remaining %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) + if err := g.updateTargetPool(loadBalancerName, hosts); err != nil { return fmt.Errorf("failed to update target pool for load balancer (%s): %v", lbRefStr, err) } - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) } } else if tpExists { // Ensure hosts are updated even if there is no other changes required on target pool. - if err := gce.updateTargetPool(loadBalancerName, hosts); err != nil { + if err := g.updateTargetPool(loadBalancerName, hosts); err != nil { return fmt.Errorf("failed to update target pool for load balancer (%s): %v", lbRefStr, err) } - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)) if hcToCreate != nil { - if hc, err := gce.ensureHttpHealthCheck(hcToCreate.Name, hcToCreate.RequestPath, int32(hcToCreate.Port)); err != nil || hc == nil { + if hc, err := g.ensureHTTPHealthCheck(hcToCreate.Name, hcToCreate.RequestPath, int32(hcToCreate.Port)); err != nil || hc == nil { return fmt.Errorf("Failed to ensure health check for %v port %d path %v: %v", loadBalancerName, hcToCreate.Port, hcToCreate.RequestPath, err) } } } else { // Panic worthy. - glog.Errorf("ensureTargetPoolAndHealthCheck(%s): target pool not exists and doesn't need to be created.", lbRefStr) + klog.Errorf("ensureTargetPoolAndHealthCheck(%s): target pool not exists and doesn't need to be created.", lbRefStr) } return nil } -func (gce *GCECloud) createTargetPoolAndHealthCheck(svc *v1.Service, name, serviceName, ipAddress, region, clusterID string, hosts []*gceInstance, hc *compute.HttpHealthCheck) error { +func (g *Cloud) createTargetPoolAndHealthCheck(svc *v1.Service, name, serviceName, ipAddress, region, clusterID string, hosts []*gceInstance, hc *compute.HttpHealthCheck) error { // health check management is coupled with targetPools to prevent leaks. A // target pool is the only thing that requires a health check, so we delete // associated checks on teardown, and ensure checks on setup. @@ -527,16 +528,16 @@ func (gce *GCECloud) createTargetPoolAndHealthCheck(svc *v1.Service, name, servi isNodesHealthCheck := hc.Name != name if isNodesHealthCheck { // Lock to prevent necessary nodes health check / firewall gets deleted. - gce.sharedResourceLock.Lock() - defer gce.sharedResourceLock.Unlock() + g.sharedResourceLock.Lock() + defer g.sharedResourceLock.Unlock() } - if err := gce.ensureHttpHealthCheckFirewall(svc, serviceName, ipAddress, region, clusterID, hosts, hc.Name, int32(hc.Port), isNodesHealthCheck); err != nil { + if err := g.ensureHTTPHealthCheckFirewall(svc, serviceName, ipAddress, region, clusterID, hosts, hc.Name, int32(hc.Port), isNodesHealthCheck); err != nil { return err } var err error hcRequestPath, hcPort := hc.RequestPath, hc.Port - if hc, err = gce.ensureHttpHealthCheck(hc.Name, hc.RequestPath, int32(hc.Port)); err != nil || hc == nil { + if hc, err = g.ensureHTTPHealthCheck(hc.Name, hc.RequestPath, int32(hc.Port)); err != nil || hc == nil { return fmt.Errorf("Failed to ensure health check for %v port %d path %v: %v", name, hcPort, hcRequestPath, err) } hcLinks = append(hcLinks, hc.SelfLink) @@ -546,7 +547,7 @@ func (gce *GCECloud) createTargetPoolAndHealthCheck(svc *v1.Service, name, servi for _, host := range hosts { instances = append(instances, host.makeComparableHostPath()) } - glog.Infof("Creating targetpool %v with %d healthchecks", name, len(hcLinks)) + klog.Infof("Creating targetpool %v with %d healthchecks", name, len(hcLinks)) pool := &compute.TargetPool{ Name: name, Description: fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, serviceName), @@ -555,14 +556,14 @@ func (gce *GCECloud) createTargetPoolAndHealthCheck(svc *v1.Service, name, servi HealthChecks: hcLinks, } - if err := gce.CreateTargetPool(pool, region); err != nil && !isHTTPErrorCode(err, http.StatusConflict) { + if err := g.CreateTargetPool(pool, region); err != nil && !isHTTPErrorCode(err, http.StatusConflict) { return err } return nil } -func (gce *GCECloud) updateTargetPool(loadBalancerName string, hosts []*gceInstance) error { - pool, err := gce.GetTargetPool(loadBalancerName, gce.region) +func (g *Cloud) updateTargetPool(loadBalancerName string, hosts []*gceInstance) error { + pool, err := g.GetTargetPool(loadBalancerName, g.region) if err != nil { return err } @@ -585,13 +586,13 @@ func (gce *GCECloud) updateTargetPool(loadBalancerName string, hosts []*gceInsta } if len(toAdd) > 0 { - if err := gce.AddInstancesToTargetPool(loadBalancerName, gce.region, toAdd); err != nil { + if err := g.AddInstancesToTargetPool(loadBalancerName, g.region, toAdd); err != nil { return err } } if len(toRemove) > 0 { - if err := gce.RemoveInstancesFromTargetPool(loadBalancerName, gce.region, toRemove); err != nil { + if err := g.RemoveInstancesFromTargetPool(loadBalancerName, g.region, toRemove); err != nil { return err } } @@ -599,23 +600,23 @@ func (gce *GCECloud) updateTargetPool(loadBalancerName string, hosts []*gceInsta // Try to verify that the correct number of nodes are now in the target pool. // We've been bitten by a bug here before (#11327) where all nodes were // accidentally removed and want to make similar problems easier to notice. - updatedPool, err := gce.GetTargetPool(loadBalancerName, gce.region) + updatedPool, err := g.GetTargetPool(loadBalancerName, g.region) if err != nil { return err } if len(updatedPool.Instances) != len(hosts) { - glog.Errorf("Unexpected number of instances (%d) in target pool %s after updating (expected %d). Instances in updated pool: %s", + klog.Errorf("Unexpected number of instances (%d) in target pool %s after updating (expected %d). Instances in updated pool: %s", len(updatedPool.Instances), loadBalancerName, len(hosts), strings.Join(updatedPool.Instances, ",")) return fmt.Errorf("Unexpected number of instances (%d) in target pool %s after update (expected %d)", len(updatedPool.Instances), loadBalancerName, len(hosts)) } return nil } -func (gce *GCECloud) targetPoolURL(name string) string { - return gce.service.BasePath + strings.Join([]string{gce.projectID, "regions", gce.region, "targetPools", name}, "/") +func (g *Cloud) targetPoolURL(name string) string { + return g.service.BasePath + strings.Join([]string{g.projectID, "regions", g.region, "targetPools", name}, "/") } -func makeHttpHealthCheck(name, path string, port int32) *compute.HttpHealthCheck { +func makeHTTPHealthCheck(name, path string, port int32) *compute.HttpHealthCheck { return &compute.HttpHealthCheck{ Name: name, Port: int64(port), @@ -629,13 +630,13 @@ func makeHttpHealthCheck(name, path string, port int32) *compute.HttpHealthCheck } } -// mergeHttpHealthChecks reconciles HttpHealthCheck configures to be no smaller +// mergeHTTPHealthChecks reconciles HttpHealthCheck configures to be no smaller // than the default values. // E.g. old health check interval is 2s, new default is 8. // The HC interval will be reconciled to 8 seconds. // If the existing health check is larger than the default interval, // the configuration will be kept. -func mergeHttpHealthChecks(hc, newHC *compute.HttpHealthCheck) *compute.HttpHealthCheck { +func mergeHTTPHealthChecks(hc, newHC *compute.HttpHealthCheck) *compute.HttpHealthCheck { if hc.CheckIntervalSec > newHC.CheckIntervalSec { newHC.CheckIntervalSec = hc.CheckIntervalSec } @@ -651,42 +652,42 @@ func mergeHttpHealthChecks(hc, newHC *compute.HttpHealthCheck) *compute.HttpHeal return newHC } -// needToUpdateHttpHealthChecks checks whether the http healthcheck needs to be +// needToUpdateHTTPHealthChecks checks whether the http healthcheck needs to be // updated. -func needToUpdateHttpHealthChecks(hc, newHC *compute.HttpHealthCheck) bool { +func needToUpdateHTTPHealthChecks(hc, newHC *compute.HttpHealthCheck) bool { changed := hc.Port != newHC.Port || hc.RequestPath != newHC.RequestPath || hc.Description != newHC.Description changed = changed || hc.CheckIntervalSec < newHC.CheckIntervalSec || hc.TimeoutSec < newHC.TimeoutSec changed = changed || hc.UnhealthyThreshold < newHC.UnhealthyThreshold || hc.HealthyThreshold < newHC.HealthyThreshold return changed } -func (gce *GCECloud) ensureHttpHealthCheck(name, path string, port int32) (hc *compute.HttpHealthCheck, err error) { - newHC := makeHttpHealthCheck(name, path, port) - hc, err = gce.GetHttpHealthCheck(name) +func (g *Cloud) ensureHTTPHealthCheck(name, path string, port int32) (hc *compute.HttpHealthCheck, err error) { + newHC := makeHTTPHealthCheck(name, path, port) + hc, err = g.GetHTTPHealthCheck(name) if hc == nil || err != nil && isHTTPErrorCode(err, http.StatusNotFound) { - glog.Infof("Did not find health check %v, creating port %v path %v", name, port, path) - if err = gce.CreateHttpHealthCheck(newHC); err != nil { + klog.Infof("Did not find health check %v, creating port %v path %v", name, port, path) + if err = g.CreateHTTPHealthCheck(newHC); err != nil { return nil, err } - hc, err = gce.GetHttpHealthCheck(name) + hc, err = g.GetHTTPHealthCheck(name) if err != nil { - glog.Errorf("Failed to get http health check %v", err) + klog.Errorf("Failed to get http health check %v", err) return nil, err } - glog.Infof("Created HTTP health check %v healthCheckNodePort: %d", name, port) + klog.Infof("Created HTTP health check %v healthCheckNodePort: %d", name, port) return hc, nil } // Validate health check fields - glog.V(4).Infof("Checking http health check params %s", name) - if needToUpdateHttpHealthChecks(hc, newHC) { - glog.Warningf("Health check %v exists but parameters have drifted - updating...", name) - newHC = mergeHttpHealthChecks(hc, newHC) - if err := gce.UpdateHttpHealthCheck(newHC); err != nil { - glog.Warningf("Failed to reconcile http health check %v parameters", name) + klog.V(4).Infof("Checking http health check params %s", name) + if needToUpdateHTTPHealthChecks(hc, newHC) { + klog.Warningf("Health check %v exists but parameters have drifted - updating...", name) + newHC = mergeHTTPHealthChecks(hc, newHC) + if err := g.UpdateHTTPHealthCheck(newHC); err != nil { + klog.Warningf("Failed to reconcile http health check %v parameters", name) return nil, err } - glog.V(4).Infof("Corrected health check %v parameters successful", name) - hc, err = gce.GetHttpHealthCheck(name) + klog.V(4).Infof("Corrected health check %v parameters successful", name) + hc, err = g.GetHTTPHealthCheck(name) if err != nil { return nil, err } @@ -698,14 +699,14 @@ func (gce *GCECloud) ensureHttpHealthCheck(name, path string, port int32) (hc *c // IP is being requested. // Returns whether the forwarding rule exists, whether it needs to be updated, // what its IP address is (if it exists), and any error we encountered. -func (gce *GCECloud) forwardingRuleNeedsUpdate(name, region string, loadBalancerIP string, ports []v1.ServicePort) (exists bool, needsUpdate bool, ipAddress string, err error) { - fwd, err := gce.GetRegionForwardingRule(name, region) +func (g *Cloud) forwardingRuleNeedsUpdate(name, region string, loadBalancerIP string, ports []v1.ServicePort) (exists bool, needsUpdate bool, ipAddress string, err error) { + fwd, err := g.GetRegionForwardingRule(name, region) if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { return false, true, "", nil } // Err on the side of caution in case of errors. Caller should notice the error and retry. - // We never want to end up recreating resources because gce api flaked. + // We never want to end up recreating resources because g api flaked. return true, false, "", fmt.Errorf("error getting load balancer's forwarding rule: %v", err) } // If the user asks for a specific static ip through the Service spec, @@ -713,22 +714,22 @@ func (gce *GCECloud) forwardingRuleNeedsUpdate(name, region string, loadBalancer // TODO: we report loadbalancer IP through status, so we want to verify if // that matches the forwarding rule as well. if loadBalancerIP != "" && loadBalancerIP != fwd.IPAddress { - glog.Infof("LoadBalancer ip for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPAddress, loadBalancerIP) + klog.Infof("LoadBalancer ip for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPAddress, loadBalancerIP) return true, true, fwd.IPAddress, nil } portRange, err := loadBalancerPortRange(ports) if err != nil { // Err on the side of caution in case of errors. Caller should notice the error and retry. - // We never want to end up recreating resources because gce api flaked. + // We never want to end up recreating resources because g api flaked. return true, false, "", err } if portRange != fwd.PortRange { - glog.Infof("LoadBalancer port range for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.PortRange, portRange) + klog.Infof("LoadBalancer port range for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.PortRange, portRange) return true, true, fwd.IPAddress, nil } // The service controller verified all the protocols match on the ports, just check the first one if string(ports[0].Protocol) != fwd.IPProtocol { - glog.Infof("LoadBalancer protocol for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPProtocol, string(ports[0].Protocol)) + klog.Infof("LoadBalancer protocol for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPProtocol, string(ports[0].Protocol)) return true, true, fwd.IPAddress, nil } @@ -737,14 +738,14 @@ func (gce *GCECloud) forwardingRuleNeedsUpdate(name, region string, loadBalancer // Doesn't check whether the hosts have changed, since host updating is handled // separately. -func (gce *GCECloud) targetPoolNeedsRecreation(name, region string, affinityType v1.ServiceAffinity) (exists bool, needsRecreation bool, err error) { - tp, err := gce.GetTargetPool(name, region) +func (g *Cloud) targetPoolNeedsRecreation(name, region string, affinityType v1.ServiceAffinity) (exists bool, needsRecreation bool, err error) { + tp, err := g.GetTargetPool(name, region) if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { return false, true, nil } // Err on the side of caution in case of errors. Caller should notice the error and retry. - // We never want to end up recreating resources because gce api flaked. + // We never want to end up recreating resources because g api flaked. return true, false, fmt.Errorf("error getting load balancer's target pool: %v", err) } // TODO: If the user modifies their Service's session affinity, it *should* @@ -756,7 +757,7 @@ func (gce *GCECloud) targetPoolNeedsRecreation(name, region string, affinityType // target pool (which results in downtime). Fix this when we have formally // defined the defaults on either side. if tp.SessionAffinity != "" && translateAffinityType(affinityType) != tp.SessionAffinity { - glog.Infof("LoadBalancer target pool %v changed affinity from %v to %v", name, tp.SessionAffinity, affinityType) + klog.Infof("LoadBalancer target pool %v changed affinity from %v to %v", name, tp.SessionAffinity, affinityType) return true, true, nil } return true, false, nil @@ -813,13 +814,13 @@ func translateAffinityType(affinityType v1.ServiceAffinity) string { case v1.ServiceAffinityNone: return gceAffinityTypeNone default: - glog.Errorf("Unexpected affinity type: %v", affinityType) + klog.Errorf("Unexpected affinity type: %v", affinityType) return gceAffinityTypeNone } } -func (gce *GCECloud) firewallNeedsUpdate(name, serviceName, region, ipAddress string, ports []v1.ServicePort, sourceRanges netsets.IPNet) (exists bool, needsUpdate bool, err error) { - fw, err := gce.GetFirewall(MakeFirewallName(name)) +func (g *Cloud) firewallNeedsUpdate(name, serviceName, region, ipAddress string, ports []v1.ServicePort, sourceRanges netsets.IPNet) (exists bool, needsUpdate bool, err error) { + fw, err := g.GetFirewall(MakeFirewallName(name)) if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { return false, true, nil @@ -845,7 +846,7 @@ func (gce *GCECloud) firewallNeedsUpdate(name, serviceName, region, ipAddress st actualSourceRanges, err := netsets.ParseIPNets(fw.SourceRanges...) if err != nil { // This really shouldn't happen... GCE has returned something unexpected - glog.Warningf("Error parsing firewall SourceRanges: %v", fw.SourceRanges) + klog.Warningf("Error parsing firewall SourceRanges: %v", fw.SourceRanges) // We don't return the error, because we can hopefully recover from this by reconfiguring the firewall return true, true, nil } @@ -856,7 +857,7 @@ func (gce *GCECloud) firewallNeedsUpdate(name, serviceName, region, ipAddress st return true, false, nil } -func (gce *GCECloud) ensureHttpHealthCheckFirewall(svc *v1.Service, serviceName, ipAddress, region, clusterID string, hosts []*gceInstance, hcName string, hcPort int32, isNodesHealthCheck bool) error { +func (g *Cloud) ensureHTTPHealthCheckFirewall(svc *v1.Service, serviceName, ipAddress, region, clusterID string, hosts []*gceInstance, hcName string, hcPort int32, isNodesHealthCheck bool) error { // Prepare the firewall params for creating / checking. desc := fmt.Sprintf(`{"kubernetes.io/cluster-id":"%s"}`, clusterID) if !isNodesHealthCheck { @@ -866,16 +867,16 @@ func (gce *GCECloud) ensureHttpHealthCheckFirewall(svc *v1.Service, serviceName, ports := []v1.ServicePort{{Protocol: "tcp", Port: hcPort}} fwName := MakeHealthCheckFirewallName(clusterID, hcName, isNodesHealthCheck) - fw, err := gce.GetFirewall(fwName) + fw, err := g.GetFirewall(fwName) if err != nil { if !isHTTPErrorCode(err, http.StatusNotFound) { return fmt.Errorf("error getting firewall for health checks: %v", err) } - glog.Infof("Creating firewall %v for health checks.", fwName) - if err := gce.createFirewall(svc, fwName, region, desc, sourceRanges, ports, hosts); err != nil { + klog.Infof("Creating firewall %v for health checks.", fwName) + if err := g.createFirewall(svc, fwName, region, desc, sourceRanges, ports, hosts); err != nil { return err } - glog.Infof("Created firewall %v for health checks.", fwName) + klog.Infof("Created firewall %v for health checks.", fwName) return nil } // Validate firewall fields. @@ -884,12 +885,12 @@ func (gce *GCECloud) ensureHttpHealthCheckFirewall(svc *v1.Service, serviceName, fw.Allowed[0].IPProtocol != string(ports[0].Protocol) || !equalStringSets(fw.Allowed[0].Ports, []string{strconv.Itoa(int(ports[0].Port))}) || !equalStringSets(fw.SourceRanges, sourceRanges.StringSlice()) { - glog.Warningf("Firewall %v exists but parameters have drifted - updating...", fwName) - if err := gce.updateFirewall(svc, fwName, region, desc, sourceRanges, ports, hosts); err != nil { - glog.Warningf("Failed to reconcile firewall %v parameters.", fwName) + klog.Warningf("Firewall %v exists but parameters have drifted - updating...", fwName) + if err := g.updateFirewall(svc, fwName, region, desc, sourceRanges, ports, hosts); err != nil { + klog.Warningf("Failed to reconcile firewall %v parameters.", fwName) return err } - glog.V(4).Infof("Corrected firewall %v parameters successful", fwName) + klog.V(4).Infof("Corrected firewall %v parameters successful", fwName) } return nil } @@ -933,17 +934,17 @@ func createForwardingRule(s CloudForwardingRuleService, name, serviceName, regio return nil } -func (gce *GCECloud) createFirewall(svc *v1.Service, name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) error { - firewall, err := gce.firewallObject(name, region, desc, sourceRanges, ports, hosts) +func (g *Cloud) createFirewall(svc *v1.Service, name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) error { + firewall, err := g.firewallObject(name, region, desc, sourceRanges, ports, hosts) if err != nil { return err } - if err = gce.CreateFirewall(firewall); err != nil { + if err = g.CreateFirewall(firewall); err != nil { if isHTTPErrorCode(err, http.StatusConflict) { return nil - } else if isForbidden(err) && gce.OnXPN() { - glog.V(4).Infof("createFirewall(%v): do not have permission to create firewall rule (on XPN). Raising event.", firewall.Name) - gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudCreateCmd(firewall, gce.NetworkProjectID())) + } else if isForbidden(err) && g.OnXPN() { + klog.V(4).Infof("createFirewall(%v): do not have permission to create firewall rule (on XPN). Raising event.", firewall.Name) + g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudCreateCmd(firewall, g.NetworkProjectID())) return nil } return err @@ -951,18 +952,18 @@ func (gce *GCECloud) createFirewall(svc *v1.Service, name, region, desc string, return nil } -func (gce *GCECloud) updateFirewall(svc *v1.Service, name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) error { - firewall, err := gce.firewallObject(name, region, desc, sourceRanges, ports, hosts) +func (g *Cloud) updateFirewall(svc *v1.Service, name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) error { + firewall, err := g.firewallObject(name, region, desc, sourceRanges, ports, hosts) if err != nil { return err } - if err = gce.UpdateFirewall(firewall); err != nil { + if err = g.UpdateFirewall(firewall); err != nil { if isHTTPErrorCode(err, http.StatusConflict) { return nil - } else if isForbidden(err) && gce.OnXPN() { - glog.V(4).Infof("updateFirewall(%v): do not have permission to update firewall rule (on XPN). Raising event.", firewall.Name) - gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudUpdateCmd(firewall, gce.NetworkProjectID())) + } else if isForbidden(err) && g.OnXPN() { + klog.V(4).Infof("updateFirewall(%v): do not have permission to update firewall rule (on XPN). Raising event.", firewall.Name) + g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudUpdateCmd(firewall, g.NetworkProjectID())) return nil } return err @@ -970,25 +971,25 @@ func (gce *GCECloud) updateFirewall(svc *v1.Service, name, region, desc string, return nil } -func (gce *GCECloud) firewallObject(name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) (*compute.Firewall, error) { +func (g *Cloud) firewallObject(name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) (*compute.Firewall, error) { allowedPorts := make([]string, len(ports)) for ix := range ports { allowedPorts[ix] = strconv.Itoa(int(ports[ix].Port)) } // If the node tags to be used for this cluster have been predefined in the // provider config, just use them. Otherwise, invoke computeHostTags method to get the tags. - hostTags := gce.nodeTags + hostTags := g.nodeTags if len(hostTags) == 0 { var err error - if hostTags, err = gce.computeHostTags(hosts); err != nil { - return nil, fmt.Errorf("No node tags supplied and also failed to parse the given lists of hosts for tags. Abort creating firewall rule.") + if hostTags, err = g.computeHostTags(hosts); err != nil { + return nil, fmt.Errorf("no node tags supplied and also failed to parse the given lists of hosts for tags. Abort creating firewall rule") } } firewall := &compute.Firewall{ Name: name, Description: desc, - Network: gce.networkURL, + Network: g.networkURL, SourceRanges: sourceRanges.StringSlice(), TargetTags: hostTags, Allowed: []*compute.FirewallAllowed{ @@ -1054,8 +1055,8 @@ func ensureStaticIP(s CloudAddressService, name, serviceName, region, existingIP return addr.Address, existed, nil } -func (gce *GCECloud) getServiceNetworkTier(svc *v1.Service) (cloud.NetworkTier, error) { - if !gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { +func (g *Cloud) getServiceNetworkTier(svc *v1.Service) (cloud.NetworkTier, error) { + if !g.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { return cloud.NetworkTierDefault, nil } tier, err := GetServiceNetworkTier(svc) @@ -1066,12 +1067,12 @@ func (gce *GCECloud) getServiceNetworkTier(svc *v1.Service) (cloud.NetworkTier, return tier, nil } -func (gce *GCECloud) deleteWrongNetworkTieredResources(lbName, lbRef string, desiredNetTier cloud.NetworkTier) error { +func (g *Cloud) deleteWrongNetworkTieredResources(lbName, lbRef string, desiredNetTier cloud.NetworkTier) error { logPrefix := fmt.Sprintf("deleteWrongNetworkTieredResources:(%s)", lbRef) - if err := deleteFWDRuleWithWrongTier(gce, gce.region, lbName, logPrefix, desiredNetTier); err != nil { + if err := deleteFWDRuleWithWrongTier(g, g.region, lbName, logPrefix, desiredNetTier); err != nil { return err } - if err := deleteAddressWithWrongTier(gce, gce.region, lbName, logPrefix, desiredNetTier); err != nil { + if err := deleteAddressWithWrongTier(g, g.region, lbName, logPrefix, desiredNetTier); err != nil { return err } return nil @@ -1090,7 +1091,7 @@ func deleteFWDRuleWithWrongTier(s CloudForwardingRuleService, region, name, logP if existingTier == desiredNetTier { return nil } - glog.V(2).Infof("%s: Network tiers do not match; existing forwarding rule: %q, desired: %q. Deleting the forwarding rule", + klog.V(2).Infof("%s: Network tiers do not match; existing forwarding rule: %q, desired: %q. Deleting the forwarding rule", logPrefix, existingTier, desiredNetTier) err = s.DeleteRegionForwardingRule(name, region) return ignoreNotFound(err) @@ -1118,7 +1119,7 @@ func deleteAddressWithWrongTier(s CloudAddressService, region, name, logPrefix s if existingTier == desiredNetTier { return nil } - glog.V(2).Infof("%s: Network tiers do not match; existing address: %q, desired: %q. Deleting the address", + klog.V(2).Infof("%s: Network tiers do not match; existing address: %q, desired: %q. Deleting the address", logPrefix, existingTier, desiredNetTier) err = s.DeleteRegionAddress(name, region) return ignoreNotFound(err) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go index 3bc83710b8670..87d1be64b49e6 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go @@ -22,11 +22,11 @@ import ( "strconv" "strings" - "github.com/golang/glog" compute "google.golang.org/api/compute/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" v1_service "k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" ) @@ -35,21 +35,21 @@ const ( allInstances = "ALL" ) -func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { +func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace} ports, protocol := getPortsAndProtocol(svc.Spec.Ports) if protocol != v1.ProtocolTCP && protocol != v1.ProtocolUDP { return nil, fmt.Errorf("Invalid protocol %s, only TCP and UDP are supported", string(protocol)) } scheme := cloud.SchemeInternal - loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, svc) + loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc) sharedBackend := shareBackendService(svc) backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, sharedBackend, scheme, protocol, svc.Spec.SessionAffinity) - backendServiceLink := gce.getBackendServiceLink(backendServiceName) + backendServiceLink := g.getBackendServiceLink(backendServiceName) // Ensure instance groups exist and nodes are assigned to groups igName := makeInstanceGroupName(clusterID) - igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes) + igLinks, err := g.ensureInternalInstanceGroups(igName, nodes) if err != nil { return nil, err } @@ -58,14 +58,14 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s var existingBackendService *compute.BackendService if existingFwdRule != nil && existingFwdRule.BackendService != "" { existingBSName := getNameFromLink(existingFwdRule.BackendService) - if existingBackendService, err = gce.GetRegionBackendService(existingBSName, gce.region); err != nil && !isNotFound(err) { + if existingBackendService, err = g.GetRegionBackendService(existingBSName, g.region); err != nil && !isNotFound(err) { return nil, err } } // Lock the sharedResourceLock to prevent any deletions of shared resources while assembling shared resources here - gce.sharedResourceLock.Lock() - defer gce.sharedResourceLock.Unlock() + g.sharedResourceLock.Lock() + defer g.sharedResourceLock.Unlock() // Ensure health check exists before creating the backend service. The health check is shared // if externalTrafficPolicy=Cluster. @@ -76,7 +76,7 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s // Service requires a special health check, retrieve the OnlyLocal port & path hcPath, hcPort = v1_service.GetServiceHealthCheckPathPort(svc) } - hc, err := gce.ensureInternalHealthCheck(hcName, nm, sharedHealthCheck, hcPath, hcPort) + hc, err := g.ensureInternalHealthCheck(hcName, nm, sharedHealthCheck, hcPath, hcPort) if err != nil { return nil, err } @@ -88,7 +88,7 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s // If the ILB already exists, continue using the subnet that it's already using. // This is to support existing ILBs that were setup using the wrong subnet. - subnetworkURL := gce.SubnetworkURL() + subnetworkURL := g.SubnetworkURL() if existingFwdRule != nil && existingFwdRule.Subnetwork != "" { // external LBs have an empty Subnetwork field. subnetworkURL = existingFwdRule.Subnetwork @@ -96,17 +96,17 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s var addrMgr *addressManager // If the network is not a legacy network, use the address manager - if !gce.IsLegacyNetwork() { - addrMgr = newAddressManager(gce, nm.String(), gce.Region(), subnetworkURL, loadBalancerName, requestedIP, cloud.SchemeInternal) + if !g.IsLegacyNetwork() { + addrMgr = newAddressManager(g, nm.String(), g.Region(), subnetworkURL, loadBalancerName, requestedIP, cloud.SchemeInternal) ipToUse, err = addrMgr.HoldAddress() if err != nil { return nil, err } - glog.V(2).Infof("ensureInternalLoadBalancer(%v): reserved IP %q for the forwarding rule", loadBalancerName, ipToUse) + klog.V(2).Infof("ensureInternalLoadBalancer(%v): reserved IP %q for the forwarding rule", loadBalancerName, ipToUse) } // Ensure firewall rules if necessary - if err = gce.ensureInternalFirewalls(loadBalancerName, ipToUse, clusterID, nm, svc, strconv.Itoa(int(hcPort)), sharedHealthCheck, nodes); err != nil { + if err = g.ensureInternalFirewalls(loadBalancerName, ipToUse, clusterID, nm, svc, strconv.Itoa(int(hcPort)), sharedHealthCheck, nodes); err != nil { return nil, err } @@ -125,47 +125,47 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s if subnetworkURL != "" { expectedFwdRule.Subnetwork = subnetworkURL } else { - expectedFwdRule.Network = gce.networkURL + expectedFwdRule.Network = g.networkURL } fwdRuleDeleted := false if existingFwdRule != nil && !fwdRuleEqual(existingFwdRule, expectedFwdRule) { - glog.V(2).Infof("ensureInternalLoadBalancer(%v): deleting existing forwarding rule with IP address %v", loadBalancerName, existingFwdRule.IPAddress) - if err = ignoreNotFound(gce.DeleteRegionForwardingRule(loadBalancerName, gce.region)); err != nil { + klog.V(2).Infof("ensureInternalLoadBalancer(%v): deleting existing forwarding rule with IP address %v", loadBalancerName, existingFwdRule.IPAddress) + if err = ignoreNotFound(g.DeleteRegionForwardingRule(loadBalancerName, g.region)); err != nil { return nil, err } fwdRuleDeleted = true } bsDescription := makeBackendServiceDescription(nm, sharedBackend) - err = gce.ensureInternalBackendService(backendServiceName, bsDescription, svc.Spec.SessionAffinity, scheme, protocol, igLinks, hc.SelfLink) + err = g.ensureInternalBackendService(backendServiceName, bsDescription, svc.Spec.SessionAffinity, scheme, protocol, igLinks, hc.SelfLink) if err != nil { return nil, err } // If we previously deleted the forwarding rule or it never existed, finally create it. if fwdRuleDeleted || existingFwdRule == nil { - glog.V(2).Infof("ensureInternalLoadBalancer(%v): creating forwarding rule", loadBalancerName) - if err = gce.CreateRegionForwardingRule(expectedFwdRule, gce.region); err != nil { + klog.V(2).Infof("ensureInternalLoadBalancer(%v): creating forwarding rule", loadBalancerName) + if err = g.CreateRegionForwardingRule(expectedFwdRule, g.region); err != nil { return nil, err } - glog.V(2).Infof("ensureInternalLoadBalancer(%v): created forwarding rule", loadBalancerName) + klog.V(2).Infof("ensureInternalLoadBalancer(%v): created forwarding rule", loadBalancerName) } // Delete the previous internal load balancer resources if necessary if existingBackendService != nil { - gce.clearPreviousInternalResources(svc, loadBalancerName, existingBackendService, backendServiceName, hcName) + g.clearPreviousInternalResources(svc, loadBalancerName, existingBackendService, backendServiceName, hcName) } if addrMgr != nil { // Now that the controller knows the forwarding rule exists, we can release the address. if err := addrMgr.ReleaseAddress(); err != nil { - glog.Errorf("ensureInternalLoadBalancer: failed to release address reservation, possibly causing an orphan: %v", err) + klog.Errorf("ensureInternalLoadBalancer: failed to release address reservation, possibly causing an orphan: %v", err) } } // Get the most recent forwarding rule for the address. - updatedFwdRule, err := gce.GetRegionForwardingRule(loadBalancerName, gce.region) + updatedFwdRule, err := g.GetRegionForwardingRule(loadBalancerName, g.region) if err != nil { return nil, err } @@ -175,12 +175,12 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s return status, nil } -func (gce *GCECloud) clearPreviousInternalResources(svc *v1.Service, loadBalancerName string, existingBackendService *compute.BackendService, expectedBSName, expectedHCName string) { +func (g *Cloud) clearPreviousInternalResources(svc *v1.Service, loadBalancerName string, existingBackendService *compute.BackendService, expectedBSName, expectedHCName string) { // If a new backend service was created, delete the old one. if existingBackendService.Name != expectedBSName { - glog.V(2).Infof("clearPreviousInternalResources(%v): expected backend service %q does not match previous %q - deleting backend service", loadBalancerName, expectedBSName, existingBackendService.Name) - if err := gce.teardownInternalBackendService(existingBackendService.Name); err != nil && !isNotFound(err) { - glog.Warningf("clearPreviousInternalResources: could not delete old backend service: %v, err: %v", existingBackendService.Name, err) + klog.V(2).Infof("clearPreviousInternalResources(%v): expected backend service %q does not match previous %q - deleting backend service", loadBalancerName, expectedBSName, existingBackendService.Name) + if err := g.teardownInternalBackendService(existingBackendService.Name); err != nil && !isNotFound(err) { + klog.Warningf("clearPreviousInternalResources: could not delete old backend service: %v, err: %v", existingBackendService.Name, err) } } @@ -188,24 +188,24 @@ func (gce *GCECloud) clearPreviousInternalResources(svc *v1.Service, loadBalance if len(existingBackendService.HealthChecks) == 1 { existingHCName := getNameFromLink(existingBackendService.HealthChecks[0]) if existingHCName != expectedHCName { - glog.V(2).Infof("clearPreviousInternalResources(%v): expected health check %q does not match previous %q - deleting health check", loadBalancerName, expectedHCName, existingHCName) - if err := gce.teardownInternalHealthCheckAndFirewall(svc, existingHCName); err != nil { - glog.Warningf("clearPreviousInternalResources: could not delete existing healthcheck: %v, err: %v", existingHCName, err) + klog.V(2).Infof("clearPreviousInternalResources(%v): expected health check %q does not match previous %q - deleting health check", loadBalancerName, expectedHCName, existingHCName) + if err := g.teardownInternalHealthCheckAndFirewall(svc, existingHCName); err != nil { + klog.Warningf("clearPreviousInternalResources: could not delete existing healthcheck: %v, err: %v", existingHCName, err) } } } else if len(existingBackendService.HealthChecks) > 1 { - glog.Warningf("clearPreviousInternalResources(%v): more than one health check on the backend service %v, %v", loadBalancerName, existingBackendService.Name, existingBackendService.HealthChecks) + klog.Warningf("clearPreviousInternalResources(%v): more than one health check on the backend service %v, %v", loadBalancerName, existingBackendService.Name, existingBackendService.HealthChecks) } } // updateInternalLoadBalancer is called when the list of nodes has changed. Therefore, only the instance groups // and possibly the backend service need to be updated. -func (gce *GCECloud) updateInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, nodes []*v1.Node) error { - gce.sharedResourceLock.Lock() - defer gce.sharedResourceLock.Unlock() +func (g *Cloud) updateInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, nodes []*v1.Node) error { + g.sharedResourceLock.Lock() + defer g.sharedResourceLock.Unlock() igName := makeInstanceGroupName(clusterID) - igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes) + igLinks, err := g.ensureInternalInstanceGroups(igName, nodes) if err != nil { return err } @@ -213,113 +213,113 @@ func (gce *GCECloud) updateInternalLoadBalancer(clusterName, clusterID string, s // Generate the backend service name _, protocol := getPortsAndProtocol(svc.Spec.Ports) scheme := cloud.SchemeInternal - loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, svc) + loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc) backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, shareBackendService(svc), scheme, protocol, svc.Spec.SessionAffinity) // Ensure the backend service has the proper backend/instance-group links - return gce.ensureInternalBackendServiceGroups(backendServiceName, igLinks) + return g.ensureInternalBackendServiceGroups(backendServiceName, igLinks) } -func (gce *GCECloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string, svc *v1.Service) error { - loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, svc) +func (g *Cloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string, svc *v1.Service) error { + loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc) _, protocol := getPortsAndProtocol(svc.Spec.Ports) scheme := cloud.SchemeInternal sharedBackend := shareBackendService(svc) sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(svc) - gce.sharedResourceLock.Lock() - defer gce.sharedResourceLock.Unlock() + g.sharedResourceLock.Lock() + defer g.sharedResourceLock.Unlock() - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): attempting delete of region internal address", loadBalancerName) - ensureAddressDeleted(gce, loadBalancerName, gce.region) + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): attempting delete of region internal address", loadBalancerName) + ensureAddressDeleted(g, loadBalancerName, g.region) - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region internal forwarding rule", loadBalancerName) - if err := ignoreNotFound(gce.DeleteRegionForwardingRule(loadBalancerName, gce.region)); err != nil { + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region internal forwarding rule", loadBalancerName) + if err := ignoreNotFound(g.DeleteRegionForwardingRule(loadBalancerName, g.region)); err != nil { return err } backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, sharedBackend, scheme, protocol, svc.Spec.SessionAffinity) - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region backend service %v", loadBalancerName, backendServiceName) - if err := gce.teardownInternalBackendService(backendServiceName); err != nil { + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region backend service %v", loadBalancerName, backendServiceName) + if err := g.teardownInternalBackendService(backendServiceName); err != nil { return err } - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting firewall for traffic", loadBalancerName) - if err := ignoreNotFound(gce.DeleteFirewall(loadBalancerName)); err != nil { - if isForbidden(err) && gce.OnXPN() { - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): could not delete traffic firewall on XPN cluster. Raising event.", loadBalancerName) - gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(loadBalancerName, gce.NetworkProjectID())) + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting firewall for traffic", loadBalancerName) + if err := ignoreNotFound(g.DeleteFirewall(loadBalancerName)); err != nil { + if isForbidden(err) && g.OnXPN() { + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): could not delete traffic firewall on XPN cluster. Raising event.", loadBalancerName) + g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(loadBalancerName, g.NetworkProjectID())) } else { return err } } hcName := makeHealthCheckName(loadBalancerName, clusterID, sharedHealthCheck) - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting health check %v and its firewall", loadBalancerName, hcName) - if err := gce.teardownInternalHealthCheckAndFirewall(svc, hcName); err != nil { + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting health check %v and its firewall", loadBalancerName, hcName) + if err := g.teardownInternalHealthCheckAndFirewall(svc, hcName); err != nil { return err } // Try deleting instance groups - expect ResourceInuse error if needed by other LBs igName := makeInstanceGroupName(clusterID) - if err := gce.ensureInternalInstanceGroupsDeleted(igName); err != nil && !isInUsedByError(err) { + if err := g.ensureInternalInstanceGroupsDeleted(igName); err != nil && !isInUsedByError(err) { return err } return nil } -func (gce *GCECloud) teardownInternalBackendService(bsName string) error { - if err := gce.DeleteRegionBackendService(bsName, gce.region); err != nil { +func (g *Cloud) teardownInternalBackendService(bsName string) error { + if err := g.DeleteRegionBackendService(bsName, g.region); err != nil { if isNotFound(err) { - glog.V(2).Infof("teardownInternalBackendService(%v): backend service already deleted. err: %v", bsName, err) + klog.V(2).Infof("teardownInternalBackendService(%v): backend service already deleted. err: %v", bsName, err) return nil } else if isInUsedByError(err) { - glog.V(2).Infof("teardownInternalBackendService(%v): backend service in use.", bsName) + klog.V(2).Infof("teardownInternalBackendService(%v): backend service in use.", bsName) return nil } else { return fmt.Errorf("failed to delete backend service: %v, err: %v", bsName, err) } } - glog.V(2).Infof("teardownInternalBackendService(%v): backend service deleted", bsName) + klog.V(2).Infof("teardownInternalBackendService(%v): backend service deleted", bsName) return nil } -func (gce *GCECloud) teardownInternalHealthCheckAndFirewall(svc *v1.Service, hcName string) error { - if err := gce.DeleteHealthCheck(hcName); err != nil { +func (g *Cloud) teardownInternalHealthCheckAndFirewall(svc *v1.Service, hcName string) error { + if err := g.DeleteHealthCheck(hcName); err != nil { if isNotFound(err) { - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check does not exist.", hcName) + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check does not exist.", hcName) // Purposely do not early return - double check the firewall does not exist } else if isInUsedByError(err) { - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check in use.", hcName) + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check in use.", hcName) return nil } else { return fmt.Errorf("failed to delete health check: %v, err: %v", hcName, err) } } - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check deleted", hcName) + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check deleted", hcName) hcFirewallName := makeHealthCheckFirewallNameFromHC(hcName) - if err := ignoreNotFound(gce.DeleteFirewall(hcFirewallName)); err != nil { - if isForbidden(err) && gce.OnXPN() { - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): could not delete health check traffic firewall on XPN cluster. Raising Event.", hcName) - gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(hcFirewallName, gce.NetworkProjectID())) + if err := ignoreNotFound(g.DeleteFirewall(hcFirewallName)); err != nil { + if isForbidden(err) && g.OnXPN() { + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): could not delete health check traffic firewall on XPN cluster. Raising Event.", hcName) + g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(hcFirewallName, g.NetworkProjectID())) return nil } return fmt.Errorf("failed to delete health check firewall: %v, err: %v", hcFirewallName, err) } - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check firewall deleted", hcFirewallName) + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check firewall deleted", hcFirewallName) return nil } -func (gce *GCECloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc string, sourceRanges []string, ports []string, protocol v1.Protocol, nodes []*v1.Node) error { - glog.V(2).Infof("ensureInternalFirewall(%v): checking existing firewall", fwName) - targetTags, err := gce.GetNodeTags(nodeNames(nodes)) +func (g *Cloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc string, sourceRanges []string, ports []string, protocol v1.Protocol, nodes []*v1.Node) error { + klog.V(2).Infof("ensureInternalFirewall(%v): checking existing firewall", fwName) + targetTags, err := g.GetNodeTags(nodeNames(nodes)) if err != nil { return err } - existingFirewall, err := gce.GetFirewall(fwName) + existingFirewall, err := g.GetFirewall(fwName) if err != nil && !isNotFound(err) { return err } @@ -327,7 +327,7 @@ func (gce *GCECloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc stri expectedFirewall := &compute.Firewall{ Name: fwName, Description: fwDesc, - Network: gce.networkURL, + Network: g.networkURL, SourceRanges: sourceRanges, TargetTags: targetTags, Allowed: []*compute.FirewallAllowed{ @@ -339,11 +339,11 @@ func (gce *GCECloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc stri } if existingFirewall == nil { - glog.V(2).Infof("ensureInternalFirewall(%v): creating firewall", fwName) - err = gce.CreateFirewall(expectedFirewall) - if err != nil && isForbidden(err) && gce.OnXPN() { - glog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to create firewall rule (on XPN). Raising event.", fwName) - gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudCreateCmd(expectedFirewall, gce.NetworkProjectID())) + klog.V(2).Infof("ensureInternalFirewall(%v): creating firewall", fwName) + err = g.CreateFirewall(expectedFirewall) + if err != nil && isForbidden(err) && g.OnXPN() { + klog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to create firewall rule (on XPN). Raising event.", fwName) + g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudCreateCmd(expectedFirewall, g.NetworkProjectID())) return nil } return err @@ -353,17 +353,17 @@ func (gce *GCECloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc stri return nil } - glog.V(2).Infof("ensureInternalFirewall(%v): updating firewall", fwName) - err = gce.UpdateFirewall(expectedFirewall) - if err != nil && isForbidden(err) && gce.OnXPN() { - glog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to update firewall rule (on XPN). Raising event.", fwName) - gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudUpdateCmd(expectedFirewall, gce.NetworkProjectID())) + klog.V(2).Infof("ensureInternalFirewall(%v): updating firewall", fwName) + err = g.UpdateFirewall(expectedFirewall) + if err != nil && isForbidden(err) && g.OnXPN() { + klog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to update firewall rule (on XPN). Raising event.", fwName) + g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudUpdateCmd(expectedFirewall, g.NetworkProjectID())) return nil } return err } -func (gce *GCECloud) ensureInternalFirewalls(loadBalancerName, ipAddress, clusterID string, nm types.NamespacedName, svc *v1.Service, healthCheckPort string, sharedHealthCheck bool, nodes []*v1.Node) error { +func (g *Cloud) ensureInternalFirewalls(loadBalancerName, ipAddress, clusterID string, nm types.NamespacedName, svc *v1.Service, healthCheckPort string, sharedHealthCheck bool, nodes []*v1.Node) error { // First firewall is for ingress traffic fwDesc := makeFirewallDescription(nm.String(), ipAddress) ports, protocol := getPortsAndProtocol(svc.Spec.Ports) @@ -371,7 +371,7 @@ func (gce *GCECloud) ensureInternalFirewalls(loadBalancerName, ipAddress, cluste if err != nil { return err } - err = gce.ensureInternalFirewall(svc, loadBalancerName, fwDesc, sourceRanges.StringSlice(), ports, protocol, nodes) + err = g.ensureInternalFirewall(svc, loadBalancerName, fwDesc, sourceRanges.StringSlice(), ports, protocol, nodes) if err != nil { return err } @@ -379,41 +379,41 @@ func (gce *GCECloud) ensureInternalFirewalls(loadBalancerName, ipAddress, cluste // Second firewall is for health checking nodes / services fwHCName := makeHealthCheckFirewallName(loadBalancerName, clusterID, sharedHealthCheck) hcSrcRanges := LoadBalancerSrcRanges() - return gce.ensureInternalFirewall(svc, fwHCName, "", hcSrcRanges, []string{healthCheckPort}, v1.ProtocolTCP, nodes) + return g.ensureInternalFirewall(svc, fwHCName, "", hcSrcRanges, []string{healthCheckPort}, v1.ProtocolTCP, nodes) } -func (gce *GCECloud) ensureInternalHealthCheck(name string, svcName types.NamespacedName, shared bool, path string, port int32) (*compute.HealthCheck, error) { - glog.V(2).Infof("ensureInternalHealthCheck(%v, %v, %v): checking existing health check", name, path, port) +func (g *Cloud) ensureInternalHealthCheck(name string, svcName types.NamespacedName, shared bool, path string, port int32) (*compute.HealthCheck, error) { + klog.V(2).Infof("ensureInternalHealthCheck(%v, %v, %v): checking existing health check", name, path, port) expectedHC := newInternalLBHealthCheck(name, svcName, shared, path, port) - hc, err := gce.GetHealthCheck(name) + hc, err := g.GetHealthCheck(name) if err != nil && !isNotFound(err) { return nil, err } if hc == nil { - glog.V(2).Infof("ensureInternalHealthCheck: did not find health check %v, creating one with port %v path %v", name, port, path) - if err = gce.CreateHealthCheck(expectedHC); err != nil { + klog.V(2).Infof("ensureInternalHealthCheck: did not find health check %v, creating one with port %v path %v", name, port, path) + if err = g.CreateHealthCheck(expectedHC); err != nil { return nil, err } - hc, err = gce.GetHealthCheck(name) + hc, err = g.GetHealthCheck(name) if err != nil { - glog.Errorf("Failed to get http health check %v", err) + klog.Errorf("Failed to get http health check %v", err) return nil, err } - glog.V(2).Infof("ensureInternalHealthCheck: created health check %v", name) + klog.V(2).Infof("ensureInternalHealthCheck: created health check %v", name) return hc, nil } if needToUpdateHealthChecks(hc, expectedHC) { - glog.V(2).Infof("ensureInternalHealthCheck: health check %v exists but parameters have drifted - updating...", name) + klog.V(2).Infof("ensureInternalHealthCheck: health check %v exists but parameters have drifted - updating...", name) expectedHC = mergeHealthChecks(hc, expectedHC) - if err := gce.UpdateHealthCheck(expectedHC); err != nil { - glog.Warningf("Failed to reconcile http health check %v parameters", name) + if err := g.UpdateHealthCheck(expectedHC); err != nil { + klog.Warningf("Failed to reconcile http health check %v parameters", name) return nil, err } - glog.V(2).Infof("ensureInternalHealthCheck: corrected health check %v parameters successful", name) - hc, err = gce.GetHealthCheck(name) + klog.V(2).Infof("ensureInternalHealthCheck: corrected health check %v parameters successful", name) + hc, err = g.GetHealthCheck(name) if err != nil { return nil, err } @@ -421,9 +421,9 @@ func (gce *GCECloud) ensureInternalHealthCheck(name string, svcName types.Namesp return hc, nil } -func (gce *GCECloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1.Node) (string, error) { - glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): checking group that it contains %v nodes", name, zone, len(nodes)) - ig, err := gce.GetInstanceGroup(name, zone) +func (g *Cloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1.Node) (string, error) { + klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): checking group that it contains %v nodes", name, zone, len(nodes)) + ig, err := g.GetInstanceGroup(name, zone) if err != nil && !isNotFound(err) { return "", err } @@ -435,18 +435,18 @@ func (gce *GCECloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1. gceNodes := sets.NewString() if ig == nil { - glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): creating instance group", name, zone) + klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): creating instance group", name, zone) newIG := &compute.InstanceGroup{Name: name} - if err = gce.CreateInstanceGroup(newIG, zone); err != nil { + if err = g.CreateInstanceGroup(newIG, zone); err != nil { return "", err } - ig, err = gce.GetInstanceGroup(name, zone) + ig, err = g.GetInstanceGroup(name, zone) if err != nil { return "", err } } else { - instances, err := gce.ListInstancesInInstanceGroup(name, zone, allInstances) + instances, err := g.ListInstancesInInstanceGroup(name, zone, allInstances) if err != nil { return "", err } @@ -461,18 +461,18 @@ func (gce *GCECloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1. addNodes := kubeNodes.Difference(gceNodes).List() if len(removeNodes) != 0 { - glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): removing nodes: %v", name, zone, removeNodes) - instanceRefs := gce.ToInstanceReferences(zone, removeNodes) + klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): removing nodes: %v", name, zone, removeNodes) + instanceRefs := g.ToInstanceReferences(zone, removeNodes) // Possible we'll receive 404's here if the instance was deleted before getting to this point. - if err = gce.RemoveInstancesFromInstanceGroup(name, zone, instanceRefs); err != nil && !isNotFound(err) { + if err = g.RemoveInstancesFromInstanceGroup(name, zone, instanceRefs); err != nil && !isNotFound(err) { return "", err } } if len(addNodes) != 0 { - glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): adding nodes: %v", name, zone, addNodes) - instanceRefs := gce.ToInstanceReferences(zone, addNodes) - if err = gce.AddInstancesToInstanceGroup(name, zone, instanceRefs); err != nil { + klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): adding nodes: %v", name, zone, addNodes) + instanceRefs := g.ToInstanceReferences(zone, addNodes) + if err = g.AddInstancesToInstanceGroup(name, zone, instanceRefs); err != nil { return "", err } } @@ -482,12 +482,12 @@ func (gce *GCECloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1. // ensureInternalInstanceGroups generates an unmanaged instance group for every zone // where a K8s node exists. It also ensures that each node belongs to an instance group -func (gce *GCECloud) ensureInternalInstanceGroups(name string, nodes []*v1.Node) ([]string, error) { +func (g *Cloud) ensureInternalInstanceGroups(name string, nodes []*v1.Node) ([]string, error) { zonedNodes := splitNodesByZone(nodes) - glog.V(2).Infof("ensureInternalInstanceGroups(%v): %d nodes over %d zones in region %v", name, len(nodes), len(zonedNodes), gce.region) + klog.V(2).Infof("ensureInternalInstanceGroups(%v): %d nodes over %d zones in region %v", name, len(nodes), len(zonedNodes), g.region) var igLinks []string for zone, nodes := range zonedNodes { - igLink, err := gce.ensureInternalInstanceGroup(name, zone, nodes) + igLink, err := g.ensureInternalInstanceGroup(name, zone, nodes) if err != nil { return []string{}, err } @@ -497,25 +497,25 @@ func (gce *GCECloud) ensureInternalInstanceGroups(name string, nodes []*v1.Node) return igLinks, nil } -func (gce *GCECloud) ensureInternalInstanceGroupsDeleted(name string) error { +func (g *Cloud) ensureInternalInstanceGroupsDeleted(name string) error { // List of nodes isn't available here - fetch all zones in region and try deleting this cluster's ig - zones, err := gce.ListZonesInRegion(gce.region) + zones, err := g.ListZonesInRegion(g.region) if err != nil { return err } - glog.V(2).Infof("ensureInternalInstanceGroupsDeleted(%v): attempting delete instance group in all %d zones", name, len(zones)) + klog.V(2).Infof("ensureInternalInstanceGroupsDeleted(%v): attempting delete instance group in all %d zones", name, len(zones)) for _, z := range zones { - if err := gce.DeleteInstanceGroup(name, z.Name); err != nil && !isNotFoundOrInUse(err) { + if err := g.DeleteInstanceGroup(name, z.Name); err != nil && !isNotFoundOrInUse(err) { return err } } return nil } -func (gce *GCECloud) ensureInternalBackendService(name, description string, affinityType v1.ServiceAffinity, scheme cloud.LbScheme, protocol v1.Protocol, igLinks []string, hcLink string) error { - glog.V(2).Infof("ensureInternalBackendService(%v, %v, %v): checking existing backend service with %d groups", name, scheme, protocol, len(igLinks)) - bs, err := gce.GetRegionBackendService(name, gce.region) +func (g *Cloud) ensureInternalBackendService(name, description string, affinityType v1.ServiceAffinity, scheme cloud.LbScheme, protocol v1.Protocol, igLinks []string, hcLink string) error { + klog.V(2).Infof("ensureInternalBackendService(%v, %v, %v): checking existing backend service with %d groups", name, scheme, protocol, len(igLinks)) + bs, err := g.GetRegionBackendService(name, g.region) if err != nil && !isNotFound(err) { return err } @@ -533,12 +533,12 @@ func (gce *GCECloud) ensureInternalBackendService(name, description string, affi // Create backend service if none was found if bs == nil { - glog.V(2).Infof("ensureInternalBackendService: creating backend service %v", name) - err := gce.CreateRegionBackendService(expectedBS, gce.region) + klog.V(2).Infof("ensureInternalBackendService: creating backend service %v", name) + err := g.CreateRegionBackendService(expectedBS, g.region) if err != nil { return err } - glog.V(2).Infof("ensureInternalBackendService: created backend service %v successfully", name) + klog.V(2).Infof("ensureInternalBackendService: created backend service %v successfully", name) return nil } @@ -546,20 +546,20 @@ func (gce *GCECloud) ensureInternalBackendService(name, description string, affi return nil } - glog.V(2).Infof("ensureInternalBackendService: updating backend service %v", name) + klog.V(2).Infof("ensureInternalBackendService: updating backend service %v", name) // Set fingerprint for optimistic locking expectedBS.Fingerprint = bs.Fingerprint - if err := gce.UpdateRegionBackendService(expectedBS, gce.region); err != nil { + if err := g.UpdateRegionBackendService(expectedBS, g.region); err != nil { return err } - glog.V(2).Infof("ensureInternalBackendService: updated backend service %v successfully", name) + klog.V(2).Infof("ensureInternalBackendService: updated backend service %v successfully", name) return nil } // ensureInternalBackendServiceGroups updates backend services if their list of backend instance groups is incorrect. -func (gce *GCECloud) ensureInternalBackendServiceGroups(name string, igLinks []string) error { - glog.V(2).Infof("ensureInternalBackendServiceGroups(%v): checking existing backend service's groups", name) - bs, err := gce.GetRegionBackendService(name, gce.region) +func (g *Cloud) ensureInternalBackendServiceGroups(name string, igLinks []string) error { + klog.V(2).Infof("ensureInternalBackendServiceGroups(%v): checking existing backend service's groups", name) + bs, err := g.GetRegionBackendService(name, g.region) if err != nil { return err } @@ -572,11 +572,11 @@ func (gce *GCECloud) ensureInternalBackendServiceGroups(name string, igLinks []s // Set the backend service's backends to the updated list. bs.Backends = backends - glog.V(2).Infof("ensureInternalBackendServiceGroups: updating backend service %v", name) - if err := gce.UpdateRegionBackendService(bs, gce.region); err != nil { + klog.V(2).Infof("ensureInternalBackendServiceGroups: updating backend service %v", name) + if err := g.UpdateRegionBackendService(bs, g.region); err != nil { return err } - glog.V(2).Infof("ensureInternalBackendServiceGroups: updated backend service %v successfully", name) + klog.V(2).Infof("ensureInternalBackendServiceGroups: updated backend service %v successfully", name) return nil } @@ -707,8 +707,8 @@ func getPortsAndProtocol(svcPorts []v1.ServicePort) (ports []string, protocol v1 return ports, protocol } -func (gce *GCECloud) getBackendServiceLink(name string) string { - return gce.service.BasePath + strings.Join([]string{gce.projectID, "regions", gce.region, "backendServices", name}, "/") +func (g *Cloud) getBackendServiceLink(name string) string { + return g.service.BasePath + strings.Join([]string{g.projectID, "regions", g.region, "backendServices", name}, "/") } func getNameFromLink(link string) string { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go index f82781e08d9ba..b7aa998cc34e8 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go @@ -27,40 +27,38 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" ) -const ( - NEGIPPortNetworkEndpointType = "GCE_VM_IP_PORT" -) - func newNetworkEndpointGroupMetricContext(request string, zone string) *metricContext { return newGenericMetricContext("networkendpointgroup_", request, unusedMetricLabel, zone, computeBetaVersion) } -func (gce *GCECloud) GetNetworkEndpointGroup(name string, zone string) (*computebeta.NetworkEndpointGroup, error) { +// GetNetworkEndpointGroup returns the collection of network endpoints for the name in zone +func (g *Cloud) GetNetworkEndpointGroup(name string, zone string) (*computebeta.NetworkEndpointGroup, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newNetworkEndpointGroupMetricContext("get", zone) - v, err := gce.c.BetaNetworkEndpointGroups().Get(ctx, meta.ZonalKey(name, zone)) + v, err := g.c.BetaNetworkEndpointGroups().Get(ctx, meta.ZonalKey(name, zone)) return v, mc.Observe(err) } -func (gce *GCECloud) ListNetworkEndpointGroup(zone string) ([]*computebeta.NetworkEndpointGroup, error) { +// ListNetworkEndpointGroup returns the collection of network endpoints for the zone +func (g *Cloud) ListNetworkEndpointGroup(zone string) ([]*computebeta.NetworkEndpointGroup, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newNetworkEndpointGroupMetricContext("list", zone) - negs, err := gce.c.BetaNetworkEndpointGroups().List(ctx, zone, filter.None) + negs, err := g.c.BetaNetworkEndpointGroups().List(ctx, zone, filter.None) return negs, mc.Observe(err) } // AggregatedListNetworkEndpointGroup returns a map of zone -> endpoint group. -func (gce *GCECloud) AggregatedListNetworkEndpointGroup() (map[string][]*computebeta.NetworkEndpointGroup, error) { +func (g *Cloud) AggregatedListNetworkEndpointGroup() (map[string][]*computebeta.NetworkEndpointGroup, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newNetworkEndpointGroupMetricContext("aggregated_list", "") // TODO: filter for the region the cluster is in. - all, err := gce.c.BetaNetworkEndpointGroups().AggregatedList(ctx, filter.None) + all, err := g.c.BetaNetworkEndpointGroups().AggregatedList(ctx, filter.None) if err != nil { return nil, mc.Observe(err) } @@ -77,23 +75,26 @@ func (gce *GCECloud) AggregatedListNetworkEndpointGroup() (map[string][]*compute return ret, mc.Observe(nil) } -func (gce *GCECloud) CreateNetworkEndpointGroup(neg *computebeta.NetworkEndpointGroup, zone string) error { +// CreateNetworkEndpointGroup creates an endpoint group in the zone +func (g *Cloud) CreateNetworkEndpointGroup(neg *computebeta.NetworkEndpointGroup, zone string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newNetworkEndpointGroupMetricContext("create", zone) - return mc.Observe(gce.c.BetaNetworkEndpointGroups().Insert(ctx, meta.ZonalKey(neg.Name, zone), neg)) + return mc.Observe(g.c.BetaNetworkEndpointGroups().Insert(ctx, meta.ZonalKey(neg.Name, zone), neg)) } -func (gce *GCECloud) DeleteNetworkEndpointGroup(name string, zone string) error { +// DeleteNetworkEndpointGroup deletes the name endpoint group from the zone +func (g *Cloud) DeleteNetworkEndpointGroup(name string, zone string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newNetworkEndpointGroupMetricContext("delete", zone) - return mc.Observe(gce.c.BetaNetworkEndpointGroups().Delete(ctx, meta.ZonalKey(name, zone))) + return mc.Observe(g.c.BetaNetworkEndpointGroups().Delete(ctx, meta.ZonalKey(name, zone))) } -func (gce *GCECloud) AttachNetworkEndpoints(name, zone string, endpoints []*computebeta.NetworkEndpoint) error { +// AttachNetworkEndpoints associates the referenced endpoints with the named endpoint group in the zone +func (g *Cloud) AttachNetworkEndpoints(name, zone string, endpoints []*computebeta.NetworkEndpoint) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -101,10 +102,11 @@ func (gce *GCECloud) AttachNetworkEndpoints(name, zone string, endpoints []*comp req := &computebeta.NetworkEndpointGroupsAttachEndpointsRequest{ NetworkEndpoints: endpoints, } - return mc.Observe(gce.c.BetaNetworkEndpointGroups().AttachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req)) + return mc.Observe(g.c.BetaNetworkEndpointGroups().AttachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req)) } -func (gce *GCECloud) DetachNetworkEndpoints(name, zone string, endpoints []*computebeta.NetworkEndpoint) error { +// DetachNetworkEndpoints breaks the association between the referenced endpoints and the named endpoint group in the zone +func (g *Cloud) DetachNetworkEndpoints(name, zone string, endpoints []*computebeta.NetworkEndpoint) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -112,10 +114,11 @@ func (gce *GCECloud) DetachNetworkEndpoints(name, zone string, endpoints []*comp req := &computebeta.NetworkEndpointGroupsDetachEndpointsRequest{ NetworkEndpoints: endpoints, } - return mc.Observe(gce.c.BetaNetworkEndpointGroups().DetachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req)) + return mc.Observe(g.c.BetaNetworkEndpointGroups().DetachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req)) } -func (gce *GCECloud) ListNetworkEndpoints(name, zone string, showHealthStatus bool) ([]*computebeta.NetworkEndpointWithHealthStatus, error) { +// ListNetworkEndpoints returns all the endpoints associated with the endpoint group in zone and optionally their status. +func (g *Cloud) ListNetworkEndpoints(name, zone string, showHealthStatus bool) ([]*computebeta.NetworkEndpointWithHealthStatus, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -127,6 +130,6 @@ func (gce *GCECloud) ListNetworkEndpoints(name, zone string, showHealthStatus bo req := &computebeta.NetworkEndpointGroupsListEndpointsRequest{ HealthStatus: healthStatus, } - l, err := gce.c.BetaNetworkEndpointGroups().ListNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req, filter.None) + l, err := g.c.BetaNetworkEndpointGroups().ListNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req, filter.None) return l, mc.Observe(err) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_routes.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_routes.go index 624b581cbd94a..cc3cbfc53a045 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_routes.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_routes.go @@ -22,11 +22,11 @@ import ( "net/http" "path" - "github.com/golang/glog" compute "google.golang.org/api/compute/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" @@ -37,14 +37,14 @@ func newRoutesMetricContext(request string) *metricContext { } // ListRoutes in the cloud environment. -func (gce *GCECloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { +func (g *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newRoutesMetricContext("list") prefix := truncateClusterName(clusterName) - f := filter.Regexp("name", prefix+"-.*").AndRegexp("network", gce.NetworkURL()).AndRegexp("description", k8sNodeRouteTag) - routes, err := gce.c.Routes().List(ctx, f) + f := filter.Regexp("name", prefix+"-.*").AndRegexp("network", g.NetworkURL()).AndRegexp("description", k8sNodeRouteTag) + routes, err := g.c.Routes().List(ctx, f) if err != nil { return nil, mc.Observe(err) } @@ -63,13 +63,13 @@ func (gce *GCECloud) ListRoutes(ctx context.Context, clusterName string) ([]*clo } // CreateRoute in the cloud environment. -func (gce *GCECloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error { +func (g *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newRoutesMetricContext("create") - targetInstance, err := gce.getInstanceByName(mapNodeNameToInstanceName(route.TargetNode)) + targetInstance, err := g.getInstanceByName(mapNodeNameToInstanceName(route.TargetNode)) if err != nil { return mc.Observe(err) } @@ -77,25 +77,25 @@ func (gce *GCECloud) CreateRoute(ctx context.Context, clusterName string, nameHi Name: truncateClusterName(clusterName) + "-" + nameHint, DestRange: route.DestinationCIDR, NextHopInstance: fmt.Sprintf("zones/%s/instances/%s", targetInstance.Zone, targetInstance.Name), - Network: gce.NetworkURL(), + Network: g.NetworkURL(), Priority: 1000, Description: k8sNodeRouteTag, } - err = gce.c.Routes().Insert(ctx, meta.GlobalKey(cr.Name), cr) + err = g.c.Routes().Insert(ctx, meta.GlobalKey(cr.Name), cr) if isHTTPErrorCode(err, http.StatusConflict) { - glog.Infof("Route %q already exists.", cr.Name) + klog.Infof("Route %q already exists.", cr.Name) err = nil } return mc.Observe(err) } // DeleteRoute from the cloud environment. -func (gce *GCECloud) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error { +func (g *Cloud) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newRoutesMetricContext("delete") - return mc.Observe(gce.c.Routes().Delete(ctx, meta.GlobalKey(route.Name))) + return mc.Observe(g.c.Routes().Delete(ctx, meta.GlobalKey(route.Name))) } func truncateClusterName(clusterName string) string { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_securitypolicy.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_securitypolicy.go index 293946590b3d7..3ce2f71fa5406 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_securitypolicy.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_securitypolicy.go @@ -29,88 +29,88 @@ func newSecurityPolicyMetricContextWithVersion(request, version string) *metricC } // GetBetaSecurityPolicy retrieves a security policy. -func (gce *GCECloud) GetBetaSecurityPolicy(name string) (*computebeta.SecurityPolicy, error) { +func (g *Cloud) GetBetaSecurityPolicy(name string) (*computebeta.SecurityPolicy, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("get", computeBetaVersion) - v, err := gce.c.BetaSecurityPolicies().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.BetaSecurityPolicies().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // ListBetaSecurityPolicy lists all security policies in the project. -func (gce *GCECloud) ListBetaSecurityPolicy() ([]*computebeta.SecurityPolicy, error) { +func (g *Cloud) ListBetaSecurityPolicy() ([]*computebeta.SecurityPolicy, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("list", computeBetaVersion) - v, err := gce.c.BetaSecurityPolicies().List(ctx, filter.None) + v, err := g.c.BetaSecurityPolicies().List(ctx, filter.None) return v, mc.Observe(err) } // CreateBetaSecurityPolicy creates the given security policy. -func (gce *GCECloud) CreateBetaSecurityPolicy(sp *computebeta.SecurityPolicy) error { +func (g *Cloud) CreateBetaSecurityPolicy(sp *computebeta.SecurityPolicy) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("create", computeBetaVersion) - return mc.Observe(gce.c.BetaSecurityPolicies().Insert(ctx, meta.GlobalKey(sp.Name), sp)) + return mc.Observe(g.c.BetaSecurityPolicies().Insert(ctx, meta.GlobalKey(sp.Name), sp)) } // DeleteBetaSecurityPolicy deletes the given security policy. -func (gce *GCECloud) DeleteBetaSecurityPolicy(name string) error { +func (g *Cloud) DeleteBetaSecurityPolicy(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("delete", computeBetaVersion) - return mc.Observe(gce.c.BetaSecurityPolicies().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.BetaSecurityPolicies().Delete(ctx, meta.GlobalKey(name))) } // PatchBetaSecurityPolicy applies the given security policy as a // patch to an existing security policy. -func (gce *GCECloud) PatchBetaSecurityPolicy(sp *computebeta.SecurityPolicy) error { +func (g *Cloud) PatchBetaSecurityPolicy(sp *computebeta.SecurityPolicy) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("patch", computeBetaVersion) - return mc.Observe(gce.c.BetaSecurityPolicies().Patch(ctx, meta.GlobalKey(sp.Name), sp)) + return mc.Observe(g.c.BetaSecurityPolicies().Patch(ctx, meta.GlobalKey(sp.Name), sp)) } // GetRuleForBetaSecurityPolicy gets rule from a security policy. -func (gce *GCECloud) GetRuleForBetaSecurityPolicy(name string) (*computebeta.SecurityPolicyRule, error) { +func (g *Cloud) GetRuleForBetaSecurityPolicy(name string) (*computebeta.SecurityPolicyRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("get_rule", computeBetaVersion) - v, err := gce.c.BetaSecurityPolicies().GetRule(ctx, meta.GlobalKey(name)) + v, err := g.c.BetaSecurityPolicies().GetRule(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // AddRuletoBetaSecurityPolicy adds the given security policy rule to // a security policy. -func (gce *GCECloud) AddRuletoBetaSecurityPolicy(name string, spr *computebeta.SecurityPolicyRule) error { +func (g *Cloud) AddRuletoBetaSecurityPolicy(name string, spr *computebeta.SecurityPolicyRule) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("add_rule", computeBetaVersion) - return mc.Observe(gce.c.BetaSecurityPolicies().AddRule(ctx, meta.GlobalKey(name), spr)) + return mc.Observe(g.c.BetaSecurityPolicies().AddRule(ctx, meta.GlobalKey(name), spr)) } // PatchRuleForBetaSecurityPolicy patches the given security policy // rule to a security policy. -func (gce *GCECloud) PatchRuleForBetaSecurityPolicy(name string, spr *computebeta.SecurityPolicyRule) error { +func (g *Cloud) PatchRuleForBetaSecurityPolicy(name string, spr *computebeta.SecurityPolicyRule) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("patch_rule", computeBetaVersion) - return mc.Observe(gce.c.BetaSecurityPolicies().PatchRule(ctx, meta.GlobalKey(name), spr)) + return mc.Observe(g.c.BetaSecurityPolicies().PatchRule(ctx, meta.GlobalKey(name), spr)) } // RemoveRuleFromBetaSecurityPolicy removes rule from a security policy. -func (gce *GCECloud) RemoveRuleFromBetaSecurityPolicy(name string) error { +func (g *Cloud) RemoveRuleFromBetaSecurityPolicy(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("remove_rule", computeBetaVersion) - return mc.Observe(gce.c.BetaSecurityPolicies().RemoveRule(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.BetaSecurityPolicies().RemoveRule(ctx, meta.GlobalKey(name))) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetpool.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetpool.go index 8c1127e749e65..65388b44980a6 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetpool.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetpool.go @@ -28,35 +28,35 @@ func newTargetPoolMetricContext(request, region string) *metricContext { } // GetTargetPool returns the TargetPool by name. -func (gce *GCECloud) GetTargetPool(name, region string) (*compute.TargetPool, error) { +func (g *Cloud) GetTargetPool(name, region string) (*compute.TargetPool, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetPoolMetricContext("get", region) - v, err := gce.c.TargetPools().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.TargetPools().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // CreateTargetPool creates the passed TargetPool -func (gce *GCECloud) CreateTargetPool(tp *compute.TargetPool, region string) error { +func (g *Cloud) CreateTargetPool(tp *compute.TargetPool, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetPoolMetricContext("create", region) - return mc.Observe(gce.c.TargetPools().Insert(ctx, meta.RegionalKey(tp.Name, region), tp)) + return mc.Observe(g.c.TargetPools().Insert(ctx, meta.RegionalKey(tp.Name, region), tp)) } // DeleteTargetPool deletes TargetPool by name. -func (gce *GCECloud) DeleteTargetPool(name, region string) error { +func (g *Cloud) DeleteTargetPool(name, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetPoolMetricContext("delete", region) - return mc.Observe(gce.c.TargetPools().Delete(ctx, meta.RegionalKey(name, region))) + return mc.Observe(g.c.TargetPools().Delete(ctx, meta.RegionalKey(name, region))) } // AddInstancesToTargetPool adds instances by link to the TargetPool -func (gce *GCECloud) AddInstancesToTargetPool(name, region string, instanceRefs []*compute.InstanceReference) error { +func (g *Cloud) AddInstancesToTargetPool(name, region string, instanceRefs []*compute.InstanceReference) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -64,11 +64,11 @@ func (gce *GCECloud) AddInstancesToTargetPool(name, region string, instanceRefs Instances: instanceRefs, } mc := newTargetPoolMetricContext("add_instances", region) - return mc.Observe(gce.c.TargetPools().AddInstance(ctx, meta.RegionalKey(name, region), req)) + return mc.Observe(g.c.TargetPools().AddInstance(ctx, meta.RegionalKey(name, region), req)) } // RemoveInstancesFromTargetPool removes instances by link to the TargetPool -func (gce *GCECloud) RemoveInstancesFromTargetPool(name, region string, instanceRefs []*compute.InstanceReference) error { +func (g *Cloud) RemoveInstancesFromTargetPool(name, region string, instanceRefs []*compute.InstanceReference) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -76,5 +76,5 @@ func (gce *GCECloud) RemoveInstancesFromTargetPool(name, region string, instance Instances: instanceRefs, } mc := newTargetPoolMetricContext("remove_instances", region) - return mc.Observe(gce.c.TargetPools().RemoveInstance(ctx, meta.RegionalKey(name, region), req)) + return mc.Observe(g.c.TargetPools().RemoveInstance(ctx, meta.RegionalKey(name, region), req)) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetproxy.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetproxy.go index c5bd21aaedf5d..64a4190628eae 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetproxy.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetproxy.go @@ -28,87 +28,87 @@ func newTargetProxyMetricContext(request string) *metricContext { return newGenericMetricContext("targetproxy", request, unusedMetricLabel, unusedMetricLabel, computeV1Version) } -// GetTargetHttpProxy returns the UrlMap by name. -func (gce *GCECloud) GetTargetHttpProxy(name string) (*compute.TargetHttpProxy, error) { +// GetTargetHTTPProxy returns the UrlMap by name. +func (g *Cloud) GetTargetHTTPProxy(name string) (*compute.TargetHttpProxy, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("get") - v, err := gce.c.TargetHttpProxies().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.TargetHttpProxies().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } -// CreateTargetHttpProxy creates a TargetHttpProxy -func (gce *GCECloud) CreateTargetHttpProxy(proxy *compute.TargetHttpProxy) error { +// CreateTargetHTTPProxy creates a TargetHttpProxy +func (g *Cloud) CreateTargetHTTPProxy(proxy *compute.TargetHttpProxy) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("create") - return mc.Observe(gce.c.TargetHttpProxies().Insert(ctx, meta.GlobalKey(proxy.Name), proxy)) + return mc.Observe(g.c.TargetHttpProxies().Insert(ctx, meta.GlobalKey(proxy.Name), proxy)) } -// SetUrlMapForTargetHttpProxy sets the given UrlMap for the given TargetHttpProxy. -func (gce *GCECloud) SetUrlMapForTargetHttpProxy(proxy *compute.TargetHttpProxy, urlMapLink string) error { +// SetURLMapForTargetHTTPProxy sets the given UrlMap for the given TargetHttpProxy. +func (g *Cloud) SetURLMapForTargetHTTPProxy(proxy *compute.TargetHttpProxy, urlMapLink string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() ref := &compute.UrlMapReference{UrlMap: urlMapLink} mc := newTargetProxyMetricContext("set_url_map") - return mc.Observe(gce.c.TargetHttpProxies().SetUrlMap(ctx, meta.GlobalKey(proxy.Name), ref)) + return mc.Observe(g.c.TargetHttpProxies().SetUrlMap(ctx, meta.GlobalKey(proxy.Name), ref)) } -// DeleteTargetHttpProxy deletes the TargetHttpProxy by name. -func (gce *GCECloud) DeleteTargetHttpProxy(name string) error { +// DeleteTargetHTTPProxy deletes the TargetHttpProxy by name. +func (g *Cloud) DeleteTargetHTTPProxy(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("delete") - return mc.Observe(gce.c.TargetHttpProxies().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.TargetHttpProxies().Delete(ctx, meta.GlobalKey(name))) } -// ListTargetHttpProxies lists all TargetHttpProxies in the project. -func (gce *GCECloud) ListTargetHttpProxies() ([]*compute.TargetHttpProxy, error) { +// ListTargetHTTPProxies lists all TargetHttpProxies in the project. +func (g *Cloud) ListTargetHTTPProxies() ([]*compute.TargetHttpProxy, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("list") - v, err := gce.c.TargetHttpProxies().List(ctx, filter.None) + v, err := g.c.TargetHttpProxies().List(ctx, filter.None) return v, mc.Observe(err) } // TargetHttpsProxy management -// GetTargetHttpsProxy returns the UrlMap by name. -func (gce *GCECloud) GetTargetHttpsProxy(name string) (*compute.TargetHttpsProxy, error) { +// GetTargetHTTPSProxy returns the UrlMap by name. +func (g *Cloud) GetTargetHTTPSProxy(name string) (*compute.TargetHttpsProxy, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("get") - v, err := gce.c.TargetHttpsProxies().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.TargetHttpsProxies().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } -// CreateTargetHttpsProxy creates a TargetHttpsProxy -func (gce *GCECloud) CreateTargetHttpsProxy(proxy *compute.TargetHttpsProxy) error { +// CreateTargetHTTPSProxy creates a TargetHttpsProxy +func (g *Cloud) CreateTargetHTTPSProxy(proxy *compute.TargetHttpsProxy) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("create") - return mc.Observe(gce.c.TargetHttpsProxies().Insert(ctx, meta.GlobalKey(proxy.Name), proxy)) + return mc.Observe(g.c.TargetHttpsProxies().Insert(ctx, meta.GlobalKey(proxy.Name), proxy)) } -// SetUrlMapForTargetHttpsProxy sets the given UrlMap for the given TargetHttpsProxy. -func (gce *GCECloud) SetUrlMapForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, urlMapLink string) error { +// SetURLMapForTargetHTTPSProxy sets the given UrlMap for the given TargetHttpsProxy. +func (g *Cloud) SetURLMapForTargetHTTPSProxy(proxy *compute.TargetHttpsProxy, urlMapLink string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("set_url_map") ref := &compute.UrlMapReference{UrlMap: urlMapLink} - return mc.Observe(gce.c.TargetHttpsProxies().SetUrlMap(ctx, meta.GlobalKey(proxy.Name), ref)) + return mc.Observe(g.c.TargetHttpsProxies().SetUrlMap(ctx, meta.GlobalKey(proxy.Name), ref)) } -// SetSslCertificateForTargetHttpsProxy sets the given SslCertificate for the given TargetHttpsProxy. -func (gce *GCECloud) SetSslCertificateForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, sslCertURLs []string) error { +// SetSslCertificateForTargetHTTPSProxy sets the given SslCertificate for the given TargetHttpsProxy. +func (g *Cloud) SetSslCertificateForTargetHTTPSProxy(proxy *compute.TargetHttpsProxy, sslCertURLs []string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -116,24 +116,24 @@ func (gce *GCECloud) SetSslCertificateForTargetHttpsProxy(proxy *compute.TargetH req := &compute.TargetHttpsProxiesSetSslCertificatesRequest{ SslCertificates: sslCertURLs, } - return mc.Observe(gce.c.TargetHttpsProxies().SetSslCertificates(ctx, meta.GlobalKey(proxy.Name), req)) + return mc.Observe(g.c.TargetHttpsProxies().SetSslCertificates(ctx, meta.GlobalKey(proxy.Name), req)) } -// DeleteTargetHttpsProxy deletes the TargetHttpsProxy by name. -func (gce *GCECloud) DeleteTargetHttpsProxy(name string) error { +// DeleteTargetHTTPSProxy deletes the TargetHttpsProxy by name. +func (g *Cloud) DeleteTargetHTTPSProxy(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("delete") - return mc.Observe(gce.c.TargetHttpsProxies().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.TargetHttpsProxies().Delete(ctx, meta.GlobalKey(name))) } -// ListTargetHttpsProxies lists all TargetHttpsProxies in the project. -func (gce *GCECloud) ListTargetHttpsProxies() ([]*compute.TargetHttpsProxy, error) { +// ListTargetHTTPSProxies lists all TargetHttpsProxies in the project. +func (g *Cloud) ListTargetHTTPSProxies() ([]*compute.TargetHttpsProxy, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("list") - v, err := gce.c.TargetHttpsProxies().List(ctx, filter.None) + v, err := g.c.TargetHttpsProxies().List(ctx, filter.None) return v, mc.Observe(err) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_tpu.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_tpu.go index 0a78f62cb3ade..b9fae1da2fd66 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_tpu.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_tpu.go @@ -23,9 +23,9 @@ import ( "net/http" "time" - "github.com/golang/glog" "google.golang.org/api/googleapi" tpuapi "google.golang.org/api/tpu/v1" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/wait" ) @@ -50,20 +50,20 @@ type tpuService struct { // CreateTPU creates the Cloud TPU node with the specified name in the // specified zone. -func (gce *GCECloud) CreateTPU(ctx context.Context, name, zone string, node *tpuapi.Node) (*tpuapi.Node, error) { +func (g *Cloud) CreateTPU(ctx context.Context, name, zone string, node *tpuapi.Node) (*tpuapi.Node, error) { var err error mc := newTPUMetricContext("create", zone) defer mc.Observe(err) var op *tpuapi.Operation - parent := getTPUParentName(gce.projectID, zone) - op, err = gce.tpuService.projects.Locations.Nodes.Create(parent, node).NodeId(name).Do() + parent := getTPUParentName(g.projectID, zone) + op, err = g.tpuService.projects.Locations.Nodes.Create(parent, node).NodeId(name).Do() if err != nil { return nil, err } - glog.V(2).Infof("Creating Cloud TPU %q in zone %q with operation %q", name, zone, op.Name) + klog.V(2).Infof("Creating Cloud TPU %q in zone %q with operation %q", name, zone, op.Name) - op, err = gce.waitForTPUOp(ctx, op) + op, err = g.waitForTPUOp(ctx, op) if err != nil { return nil, err } @@ -83,20 +83,20 @@ func (gce *GCECloud) CreateTPU(ctx context.Context, name, zone string, node *tpu // DeleteTPU deletes the Cloud TPU with the specified name in the specified // zone. -func (gce *GCECloud) DeleteTPU(ctx context.Context, name, zone string) error { +func (g *Cloud) DeleteTPU(ctx context.Context, name, zone string) error { var err error mc := newTPUMetricContext("delete", zone) defer mc.Observe(err) var op *tpuapi.Operation - name = getTPUName(gce.projectID, zone, name) - op, err = gce.tpuService.projects.Locations.Nodes.Delete(name).Do() + name = getTPUName(g.projectID, zone, name) + op, err = g.tpuService.projects.Locations.Nodes.Delete(name).Do() if err != nil { return err } - glog.V(2).Infof("Deleting Cloud TPU %q in zone %q with operation %q", name, zone, op.Name) + klog.V(2).Infof("Deleting Cloud TPU %q in zone %q with operation %q", name, zone, op.Name) - op, err = gce.waitForTPUOp(ctx, op) + op, err = g.waitForTPUOp(ctx, op) if err != nil { return err } @@ -108,11 +108,11 @@ func (gce *GCECloud) DeleteTPU(ctx context.Context, name, zone string) error { } // GetTPU returns the Cloud TPU with the specified name in the specified zone. -func (gce *GCECloud) GetTPU(ctx context.Context, name, zone string) (*tpuapi.Node, error) { +func (g *Cloud) GetTPU(ctx context.Context, name, zone string) (*tpuapi.Node, error) { mc := newTPUMetricContext("get", zone) - name = getTPUName(gce.projectID, zone, name) - node, err := gce.tpuService.projects.Locations.Nodes.Get(name).Do() + name = getTPUName(g.projectID, zone, name) + node, err := g.tpuService.projects.Locations.Nodes.Get(name).Do() if err != nil { return nil, mc.Observe(err) } @@ -120,11 +120,11 @@ func (gce *GCECloud) GetTPU(ctx context.Context, name, zone string) (*tpuapi.Nod } // ListTPUs returns Cloud TPUs in the specified zone. -func (gce *GCECloud) ListTPUs(ctx context.Context, zone string) ([]*tpuapi.Node, error) { +func (g *Cloud) ListTPUs(ctx context.Context, zone string) ([]*tpuapi.Node, error) { mc := newTPUMetricContext("list", zone) - parent := getTPUParentName(gce.projectID, zone) - response, err := gce.tpuService.projects.Locations.Nodes.List(parent).Do() + parent := getTPUParentName(g.projectID, zone) + response, err := g.tpuService.projects.Locations.Nodes.List(parent).Do() if err != nil { return nil, mc.Observe(err) } @@ -132,10 +132,10 @@ func (gce *GCECloud) ListTPUs(ctx context.Context, zone string) ([]*tpuapi.Node, } // ListLocations returns the zones where Cloud TPUs are available. -func (gce *GCECloud) ListLocations(ctx context.Context) ([]*tpuapi.Location, error) { +func (g *Cloud) ListLocations(ctx context.Context) ([]*tpuapi.Location, error) { mc := newTPUMetricContext("list_locations", "") - parent := getTPUProjectURL(gce.projectID) - response, err := gce.tpuService.projects.Locations.List(parent).Do() + parent := getTPUProjectURL(g.projectID) + response, err := g.tpuService.projects.Locations.List(parent).Do() if err != nil { return nil, mc.Observe(err) } @@ -144,32 +144,32 @@ func (gce *GCECloud) ListLocations(ctx context.Context) ([]*tpuapi.Location, err // waitForTPUOp checks whether the op is done every 30 seconds before the ctx // is cancelled. -func (gce *GCECloud) waitForTPUOp(ctx context.Context, op *tpuapi.Operation) (*tpuapi.Operation, error) { +func (g *Cloud) waitForTPUOp(ctx context.Context, op *tpuapi.Operation) (*tpuapi.Operation, error) { if err := wait.PollInfinite(30*time.Second, func() (bool, error) { // Check if context has been cancelled. select { case <-ctx.Done(): - glog.V(3).Infof("Context for operation %q has been cancelled: %s", op.Name, ctx.Err()) + klog.V(3).Infof("Context for operation %q has been cancelled: %s", op.Name, ctx.Err()) return true, ctx.Err() default: } - glog.V(3).Infof("Waiting for operation %q to complete...", op.Name) + klog.V(3).Infof("Waiting for operation %q to complete...", op.Name) start := time.Now() - gce.operationPollRateLimiter.Accept() + g.operationPollRateLimiter.Accept() duration := time.Now().Sub(start) if duration > 5*time.Second { - glog.V(2).Infof("Getting operation %q throttled for %v", op.Name, duration) + klog.V(2).Infof("Getting operation %q throttled for %v", op.Name, duration) } var err error - op, err = gce.tpuService.projects.Locations.Operations.Get(op.Name).Do() + op, err = g.tpuService.projects.Locations.Operations.Get(op.Name).Do() if err != nil { return true, err } if op.Done { - glog.V(3).Infof("Operation %q has completed", op.Name) + klog.V(3).Infof("Operation %q has completed", op.Name) return true, nil } return false, nil diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_urlmap.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_urlmap.go index b0e60093c15b7..7ee8b8213271a 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_urlmap.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_urlmap.go @@ -24,53 +24,53 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" ) -func newUrlMapMetricContext(request string) *metricContext { +func newURLMapMetricContext(request string) *metricContext { return newGenericMetricContext("urlmap", request, unusedMetricLabel, unusedMetricLabel, computeV1Version) } -// GetUrlMap returns the UrlMap by name. -func (gce *GCECloud) GetUrlMap(name string) (*compute.UrlMap, error) { +// GetURLMap returns the UrlMap by name. +func (g *Cloud) GetURLMap(name string) (*compute.UrlMap, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - mc := newUrlMapMetricContext("get") - v, err := gce.c.UrlMaps().Get(ctx, meta.GlobalKey(name)) + mc := newURLMapMetricContext("get") + v, err := g.c.UrlMaps().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } -// CreateUrlMap creates a url map -func (gce *GCECloud) CreateUrlMap(urlMap *compute.UrlMap) error { +// CreateURLMap creates a url map +func (g *Cloud) CreateURLMap(urlMap *compute.UrlMap) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - mc := newUrlMapMetricContext("create") - return mc.Observe(gce.c.UrlMaps().Insert(ctx, meta.GlobalKey(urlMap.Name), urlMap)) + mc := newURLMapMetricContext("create") + return mc.Observe(g.c.UrlMaps().Insert(ctx, meta.GlobalKey(urlMap.Name), urlMap)) } -// UpdateUrlMap applies the given UrlMap as an update -func (gce *GCECloud) UpdateUrlMap(urlMap *compute.UrlMap) error { +// UpdateURLMap applies the given UrlMap as an update +func (g *Cloud) UpdateURLMap(urlMap *compute.UrlMap) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - mc := newUrlMapMetricContext("update") - return mc.Observe(gce.c.UrlMaps().Update(ctx, meta.GlobalKey(urlMap.Name), urlMap)) + mc := newURLMapMetricContext("update") + return mc.Observe(g.c.UrlMaps().Update(ctx, meta.GlobalKey(urlMap.Name), urlMap)) } -// DeleteUrlMap deletes a url map by name. -func (gce *GCECloud) DeleteUrlMap(name string) error { +// DeleteURLMap deletes a url map by name. +func (g *Cloud) DeleteURLMap(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - mc := newUrlMapMetricContext("delete") - return mc.Observe(gce.c.UrlMaps().Delete(ctx, meta.GlobalKey(name))) + mc := newURLMapMetricContext("delete") + return mc.Observe(g.c.UrlMaps().Delete(ctx, meta.GlobalKey(name))) } -// ListUrlMaps lists all UrlMaps in the project. -func (gce *GCECloud) ListUrlMaps() ([]*compute.UrlMap, error) { +// ListURLMaps lists all UrlMaps in the project. +func (g *Cloud) ListURLMaps() ([]*compute.UrlMap, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - mc := newUrlMapMetricContext("list") - v, err := gce.c.UrlMaps().List(ctx, filter.None) + mc := newURLMapMetricContext("list") + v, err := g.c.UrlMaps().List(ctx, filter.None) return v, mc.Observe(err) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_util.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_util.go index 43c882be9043b..d66029a1df6c9 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_util.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_util.go @@ -24,17 +24,55 @@ import ( "regexp" "sort" "strings" + "sync" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock" "cloud.google.com/go/compute/metadata" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) +func fakeGCECloud(vals TestClusterValues) (*Cloud, error) { + gce := NewFakeGCECloud(vals) + + gce.AlphaFeatureGate = NewAlphaFeatureGate([]string{}) + gce.nodeInformerSynced = func() bool { return true } + + mockGCE := gce.c.(*cloud.MockGCE) + mockGCE.MockTargetPools.AddInstanceHook = mock.AddInstanceHook + mockGCE.MockTargetPools.RemoveInstanceHook = mock.RemoveInstanceHook + mockGCE.MockForwardingRules.InsertHook = mock.InsertFwdRuleHook + mockGCE.MockAddresses.InsertHook = mock.InsertAddressHook + mockGCE.MockAlphaAddresses.InsertHook = mock.InsertAlphaAddressHook + mockGCE.MockAlphaAddresses.X = mock.AddressAttributes{} + mockGCE.MockAddresses.X = mock.AddressAttributes{} + + mockGCE.MockInstanceGroups.X = mock.InstanceGroupAttributes{ + InstanceMap: make(map[meta.Key]map[string]*compute.InstanceWithNamedPorts), + Lock: &sync.Mutex{}, + } + mockGCE.MockInstanceGroups.AddInstancesHook = mock.AddInstancesHook + mockGCE.MockInstanceGroups.RemoveInstancesHook = mock.RemoveInstancesHook + mockGCE.MockInstanceGroups.ListInstancesHook = mock.ListInstancesHook + + mockGCE.MockRegionBackendServices.UpdateHook = mock.UpdateRegionBackendServiceHook + mockGCE.MockHealthChecks.UpdateHook = mock.UpdateHealthCheckHook + mockGCE.MockFirewalls.UpdateHook = mock.UpdateFirewallHook + + keyGA := meta.GlobalKey("key-ga") + mockGCE.MockZones.Objects[*keyGA] = &cloud.MockZonesObj{ + Obj: &compute.Zone{Name: vals.ZoneName, Region: gce.getRegionLink(vals.Region)}, + } + + return gce, nil +} + type gceInstance struct { Zone string Name string @@ -50,7 +88,7 @@ var ( } ) -var providerIdRE = regexp.MustCompile(`^` + ProviderName + `://([^/]+)/([^/]+)/([^/]+)$`) +var providerIDRE = regexp.MustCompile(`^` + ProviderName + `://([^/]+)/([^/]+)/([^/]+)$`) func getProjectAndZone() (string, string, error) { result, err := metadata.Get("instance/zone") @@ -69,10 +107,10 @@ func getProjectAndZone() (string, string, error) { return projectID, zone, nil } -func (gce *GCECloud) raiseFirewallChangeNeededEvent(svc *v1.Service, cmd string) { +func (g *Cloud) raiseFirewallChangeNeededEvent(svc *v1.Service, cmd string) { msg := fmt.Sprintf("Firewall change required by network admin: `%v`", cmd) - if gce.eventRecorder != nil && svc != nil { - gce.eventRecorder.Event(svc, v1.EventTypeNormal, "LoadBalancerManualChange", msg) + if g.eventRecorder != nil && svc != nil { + g.eventRecorder.Event(svc, v1.EventTypeNormal, "LoadBalancerManualChange", msg) } } @@ -82,13 +120,13 @@ func FirewallToGCloudCreateCmd(fw *compute.Firewall, projectID string) string { return fmt.Sprintf("gcloud compute firewall-rules create %v --network %v %v", fw.Name, getNameFromLink(fw.Network), args) } -// FirewallToGCloudCreateCmd generates a gcloud command to update a firewall to specified params +// FirewallToGCloudUpdateCmd generates a gcloud command to update a firewall to specified params func FirewallToGCloudUpdateCmd(fw *compute.Firewall, projectID string) string { args := firewallToGcloudArgs(fw, projectID) return fmt.Sprintf("gcloud compute firewall-rules update %v %v", fw.Name, args) } -// FirewallToGCloudCreateCmd generates a gcloud command to delete a firewall to specified params +// FirewallToGCloudDeleteCmd generates a gcloud command to delete a firewall to specified params func FirewallToGCloudDeleteCmd(fwName, projectID string) string { return fmt.Sprintf("gcloud compute firewall-rules delete %v --project %v", fwName, projectID) } @@ -138,11 +176,6 @@ func mapNodeNameToInstanceName(nodeName types.NodeName) string { return string(nodeName) } -// mapInstanceToNodeName maps a GCE Instance to a k8s NodeName -func mapInstanceToNodeName(instance *compute.Instance) types.NodeName { - return types.NodeName(instance.Name) -} - // GetGCERegion returns region of the gce zone. Zone names // are of the form: ${region-name}-${ix}. // For example, "us-central1-b" has a region of "us-central1". @@ -172,7 +205,7 @@ func isInUsedByError(err error) bool { // A providerID is build out of '${ProviderName}://${project-id}/${zone}/${instance-name}' // See cloudprovider.GetInstanceProviderID. func splitProviderID(providerID string) (project, zone, instance string, err error) { - matches := providerIdRE.FindStringSubmatch(providerID) + matches := providerIDRE.FindStringSubmatch(providerID) if len(matches) != 4 { return "", "", "", errors.New("error splitting providerID") } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_zones.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_zones.go index 503ca348f484c..3e4bb059bd648 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_zones.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_zones.go @@ -23,7 +23,7 @@ import ( compute "google.golang.org/api/compute/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" ) @@ -33,17 +33,17 @@ func newZonesMetricContext(request, region string) *metricContext { } // GetZone creates a cloudprovider.Zone of the current zone and region -func (gce *GCECloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { +func (g *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { return cloudprovider.Zone{ - FailureDomain: gce.localZone, - Region: gce.region, + FailureDomain: g.localZone, + Region: g.region, }, nil } // GetZoneByProviderID implements Zones.GetZoneByProviderID // This is particularly useful in external cloud providers where the kubelet // does not initialize node data. -func (gce *GCECloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) { +func (g *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) { _, zone, _, err := splitProviderID(providerID) if err != nil { return cloudprovider.Zone{}, err @@ -58,9 +58,9 @@ func (gce *GCECloud) GetZoneByProviderID(ctx context.Context, providerID string) // GetZoneByNodeName implements Zones.GetZoneByNodeName // This is particularly useful in external cloud providers where the kubelet // does not initialize node data. -func (gce *GCECloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) { +func (g *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) { instanceName := mapNodeNameToInstanceName(nodeName) - instance, err := gce.getInstanceByName(instanceName) + instance, err := g.getInstanceByName(instanceName) if err != nil { return cloudprovider.Zone{}, err } @@ -72,18 +72,18 @@ func (gce *GCECloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeN } // ListZonesInRegion returns all zones in a GCP region -func (gce *GCECloud) ListZonesInRegion(region string) ([]*compute.Zone, error) { +func (g *Cloud) ListZonesInRegion(region string) ([]*compute.Zone, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newZonesMetricContext("list", region) - list, err := gce.c.Zones().List(ctx, filter.Regexp("region", gce.getRegionLink(region))) + list, err := g.c.Zones().List(ctx, filter.Regexp("region", g.getRegionLink(region))) if err != nil { return nil, mc.Observe(err) } return list, mc.Observe(err) } -func (gce *GCECloud) getRegionLink(region string) string { - return gce.service.BasePath + strings.Join([]string{gce.projectID, "regions", region}, "/") +func (g *Cloud) getRegionLink(region string) string { + return g.service.BasePath + strings.Join([]string{g.projectID, "regions", region}, "/") } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/support.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/support.go index 7861e08acbc76..e6c471855fe64 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/support.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/support.go @@ -25,7 +25,7 @@ import ( // gceProjectRouter sends requests to the appropriate project ID. type gceProjectRouter struct { - gce *GCECloud + gce *Cloud } // ProjectID returns the project ID to be used for the given operation. @@ -40,7 +40,7 @@ func (r *gceProjectRouter) ProjectID(ctx context.Context, version meta.Version, // gceRateLimiter implements cloud.RateLimiter. type gceRateLimiter struct { - gce *GCECloud + gce *Cloud } // Accept blocks until the operation can be performed. @@ -63,10 +63,10 @@ func (l *gceRateLimiter) Accept(ctx context.Context, key *cloud.RateLimitKey) er return nil } -// CreateGCECloudWithCloud is a helper function to create an instance of GCECloud with the +// CreateGCECloudWithCloud is a helper function to create an instance of Cloud with the // given Cloud interface implementation. Typical usage is to use cloud.NewMockGCE to get a // handle to a mock Cloud instance and then use that for testing. -func CreateGCECloudWithCloud(config *CloudConfig, c cloud.Cloud) (*GCECloud, error) { +func CreateGCECloudWithCloud(config *CloudConfig, c cloud.Cloud) (*Cloud, error) { gceCloud, err := CreateGCECloud(config) if err == nil { gceCloud.c = c diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go index 2f4bb09119ab1..e8434bd2a0c77 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go @@ -57,6 +57,7 @@ func init() { prometheus.MustRegister(getTokenFailCounter) } +// AltTokenSource is the structure holding the data for the functionality needed to generates tokens type AltTokenSource struct { oauthClient *http.Client tokenURL string @@ -64,6 +65,7 @@ type AltTokenSource struct { throttle flowcontrol.RateLimiter } +// Token returns a token which may be used for authentication func (a *AltTokenSource) Token() (*oauth2.Token, error) { a.throttle.Accept() getTokenCounter.Inc() @@ -100,6 +102,7 @@ func (a *AltTokenSource) token() (*oauth2.Token, error) { }, nil } +// NewAltTokenSource constructs a new alternate token source for generating tokens. func NewAltTokenSource(tokenURL, tokenBody string) oauth2.TokenSource { client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource("")) a := &AltTokenSource{ diff --git a/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions b/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions index c45988ec9b88d..31aa4d0b50440 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions +++ b/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions @@ -11,8 +11,356 @@ "ForbiddenPrefixes": [ "k8s.io/kubernetes/pkg/client/unversioned/testclient" ] + }, + { + "SelectorRegexp": "k8s[.]io/(api/|apimachinery/|apiextensions-apiserver/|apiserver/)", + "AllowedPrefixes": [ + "k8s.io/api/apps/v1", + "k8s.io/api/apps/v1beta1", + "k8s.io/api/authentication/v1", + "k8s.io/api/authorization/v1beta1", + "k8s.io/api/autoscaling/v1", + "k8s.io/api/autoscaling/v2beta1", + "k8s.io/api/autoscaling/v2beta2", + "k8s.io/api/batch/v1", + "k8s.io/api/batch/v1beta1", + "k8s.io/api/certificates/v1beta1", + "k8s.io/api/core/v1", + "k8s.io/api/coordination/v1beta1", + "k8s.io/api/extensions/v1beta1", + "k8s.io/api/policy/v1beta1", + "k8s.io/api/rbac/v1", + "k8s.io/api/storage/v1", + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake", + "k8s.io/apimachinery/pkg/api/equality", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/meta", + "k8s.io/apimachinery/pkg/api/meta/testrestmapper", + "k8s.io/apimachinery/pkg/api/resource", + "k8s.io/apimachinery/pkg/apis/config", + "k8s.io/apimachinery/pkg/apis/config/v1alpha1", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "k8s.io/apimachinery/pkg/conversion", + "k8s.io/apimachinery/pkg/fields", + "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/runtime/serializer", + "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/clock", + "k8s.io/apimachinery/pkg/util/diff", + "k8s.io/apimachinery/pkg/util/errors", + "k8s.io/apimachinery/pkg/util/intstr", + "k8s.io/apimachinery/pkg/util/json", + "k8s.io/apimachinery/pkg/util/rand", + "k8s.io/apimachinery/pkg/util/runtime", + "k8s.io/apimachinery/pkg/util/sets", + "k8s.io/apimachinery/pkg/util/strategicpatch", + "k8s.io/apimachinery/pkg/util/uuid", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/apimachinery/pkg/util/version", + "k8s.io/apimachinery/pkg/watch", + "k8s.io/apiserver/pkg/apis/config", + "k8s.io/apiserver/pkg/apis/config/v1alpha1", + "k8s.io/apiserver/pkg/authentication/serviceaccount", + "k8s.io/apiserver/pkg/storage/names", + "k8s.io/apiserver/pkg/util/feature", + "k8s.io/apiextensions-apiserver/pkg/features", + "k8s.io/apimachinery/pkg/api/validation", + "k8s.io/apimachinery/pkg/apis/meta/internalversion", + "k8s.io/apimachinery/pkg/selection", + "k8s.io/apimachinery/pkg/util/validation", + "k8s.io/apimachinery/pkg/util/validation/field", + "k8s.io/apiserver/pkg/authentication/authenticator", + "k8s.io/apiserver/pkg/authentication/user", + "k8s.io/apiserver/pkg/features", + "k8s.io/apiserver/pkg/registry/generic", + "k8s.io/apimachinery/pkg/version", + "k8s.io/api/imagepolicy/v1alpha1", + "k8s.io/apiserver/pkg/admission", + "k8s.io/apiserver/pkg/storage", + "k8s.io/api/batch/v2alpha1", + "k8s.io/apiserver/pkg/registry/rest", + "k8s.io/apimachinery/pkg/util/initialization", + "k8s.io/api/scheduling/v1alpha1", + "k8s.io/api/admissionregistration/v1beta1", + "k8s.io/api/authorization/v1", + "k8s.io/api/settings/v1alpha1", + "k8s.io/api/admission/v1beta1", + "k8s.io/api/networking/v1", + "k8s.io/api/admissionregistration/v1alpha1" + ] + }, + { + "SelectorRegexp": "github[.]com/", + "AllowedPrefixes": [ + "github.com/Azure/go-autorest/autorest/to", + "github.com/cloudflare/cfssl/config", + "github.com/cloudflare/cfssl/helpers", + "github.com/cloudflare/cfssl/signer", + "github.com/cloudflare/cfssl/signer/local", + "github.com/davecgh/go-spew/spew", + "github.com/evanphx/json-patch", + "github.com/golang/groupcache/lru", + "github.com/prometheus/client_golang/prometheus", + "github.com/robfig/cron", + "github.com/spf13/pflag", + "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/require", + "github.com/docker/distribution/reference", + "github.com/google/gofuzz" + ] + }, + { + "SelectorRegexp": "k8s[.]io/client-go/", + "AllowedPrefixes": [ + "k8s.io/client-go/discovery", + "k8s.io/client-go/dynamic", + "k8s.io/client-go/informers", + "k8s.io/client-go/informers/apps/v1", + "k8s.io/client-go/informers/apps/v1beta1", + "k8s.io/client-go/informers/autoscaling/v1", + "k8s.io/client-go/informers/batch/v1", + "k8s.io/client-go/informers/certificates/v1beta1", + "k8s.io/client-go/informers/core/v1", + "k8s.io/client-go/informers/extensions/v1beta1", + "k8s.io/client-go/informers/policy/v1beta1", + "k8s.io/client-go/informers/rbac/v1", + "k8s.io/client-go/informers/storage/v1", + "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/fake", + "k8s.io/client-go/kubernetes/scheme", + "k8s.io/client-go/kubernetes/typed/apps/v1", + "k8s.io/client-go/kubernetes/typed/authentication/v1", + "k8s.io/client-go/kubernetes/typed/autoscaling/v1", + "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", + "k8s.io/client-go/kubernetes/typed/core/v1", + "k8s.io/client-go/kubernetes/typed/policy/v1beta1", + "k8s.io/client-go/kubernetes/typed/rbac/v1", + "k8s.io/client-go/listers/apps/v1", + "k8s.io/client-go/listers/apps/v1beta1", + "k8s.io/client-go/listers/autoscaling/v1", + "k8s.io/client-go/listers/batch/v1", + "k8s.io/client-go/listers/certificates/v1beta1", + "k8s.io/client-go/listers/core/v1", + "k8s.io/client-go/listers/coordination/v1beta1", + "k8s.io/client-go/listers/extensions/v1beta1", + "k8s.io/client-go/listers/policy/v1beta1", + "k8s.io/client-go/listers/rbac/v1", + "k8s.io/client-go/listers/storage/v1", + "k8s.io/client-go/rest", + "k8s.io/client-go/scale", + "k8s.io/client-go/scale/fake", + "k8s.io/client-go/testing", + "k8s.io/client-go/tools/bootstrap/token/api", + "k8s.io/client-go/tools/cache", + "k8s.io/client-go/tools/leaderelection/resourcelock", + "k8s.io/client-go/tools/record", + "k8s.io/client-go/tools/reference", + "k8s.io/client-go/tools/watch", + "k8s.io/client-go/util/cert", + "k8s.io/client-go/util/flowcontrol", + "k8s.io/client-go/util/integer", + "k8s.io/client-go/util/retry", + "k8s.io/client-go/util/testing", + "k8s.io/client-go/util/workqueue" + ] + }, + { + "SelectorRegexp": "k8s[.]io/kubernetes/pkg", + "AllowedPrefixes": [ + "k8s.io/kubernetes/pkg/api/legacyscheme", + "k8s.io/kubernetes/pkg/api/testapi", + "k8s.io/kubernetes/pkg/api/v1/endpoints", + "k8s.io/kubernetes/pkg/api/v1/node", + "k8s.io/kubernetes/pkg/api/v1/pod", + "k8s.io/kubernetes/pkg/apis/apps/install", + "k8s.io/kubernetes/pkg/apis/apps/v1", + "k8s.io/kubernetes/pkg/apis/authentication/install", + "k8s.io/kubernetes/pkg/apis/authorization/install", + "k8s.io/kubernetes/pkg/apis/autoscaling", + "k8s.io/kubernetes/pkg/apis/autoscaling/install", + "k8s.io/kubernetes/pkg/apis/batch/install", + "k8s.io/kubernetes/pkg/apis/certificates/install", + "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", + "k8s.io/kubernetes/pkg/apis/core", + "k8s.io/kubernetes/pkg/apis/core/helper", + "k8s.io/kubernetes/pkg/apis/core/install", + "k8s.io/kubernetes/pkg/apis/core/v1", + "k8s.io/kubernetes/pkg/apis/core/v1/helper", + "k8s.io/kubernetes/pkg/apis/core/validation", + "k8s.io/kubernetes/pkg/apis/extensions", + "k8s.io/kubernetes/pkg/apis/extensions/install", + "k8s.io/kubernetes/pkg/apis/policy/install", + "k8s.io/kubernetes/pkg/apis/rbac/install", + "k8s.io/kubernetes/pkg/apis/settings/install", + "k8s.io/kubernetes/pkg/apis/storage/install", + "k8s.io/kubernetes/pkg/client/unversioned", + "k8s.io/kubernetes/pkg/client/unversioned/testclient", + "k8s.io/kubernetes/pkg/cloudprovider", + "k8s.io/kubernetes/pkg/cloudprovider/providers/fake", + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", + "k8s.io/kubernetes/pkg/controller", + "k8s.io/kubernetes/pkg/controller/apis/config", + "k8s.io/kubernetes/pkg/controller/apis/config/scheme", + "k8s.io/kubernetes/pkg/controller/apis/config/v1alpha1", + "k8s.io/kubernetes/pkg/controller/bootstrap", + "k8s.io/kubernetes/pkg/controller/certificates", + "k8s.io/kubernetes/pkg/controller/certificates/approver", + "k8s.io/kubernetes/pkg/controller/certificates/cleaner", + "k8s.io/kubernetes/pkg/controller/certificates/signer", + "k8s.io/kubernetes/pkg/controller/cloud", + "k8s.io/kubernetes/pkg/controller/clusterroleaggregation", + "k8s.io/kubernetes/pkg/controller/cronjob", + "k8s.io/kubernetes/pkg/controller/daemon", + "k8s.io/kubernetes/pkg/controller/daemon/util", + "k8s.io/kubernetes/pkg/controller/deployment", + "k8s.io/kubernetes/pkg/controller/deployment/util", + "k8s.io/kubernetes/pkg/controller/disruption", + "k8s.io/kubernetes/pkg/controller/endpoint", + "k8s.io/kubernetes/pkg/controller/garbagecollector", + "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly", + "k8s.io/kubernetes/pkg/controller/history", + "k8s.io/kubernetes/pkg/controller/job", + "k8s.io/kubernetes/pkg/controller/namespace", + "k8s.io/kubernetes/pkg/controller/namespace/deletion", + "k8s.io/kubernetes/pkg/controller/nodeipam", + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam", + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset", + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync", + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test", + "k8s.io/kubernetes/pkg/controller/nodelifecycle", + "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler", + "k8s.io/kubernetes/pkg/controller/podautoscaler", + "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics", + "k8s.io/kubernetes/pkg/controller/podgc", + "k8s.io/kubernetes/pkg/controller/replicaset", + "k8s.io/kubernetes/pkg/controller/replicaset/options", + "k8s.io/kubernetes/pkg/controller/replication", + "k8s.io/kubernetes/pkg/controller/resourcequota", + "k8s.io/kubernetes/pkg/controller/route", + "k8s.io/kubernetes/pkg/controller/service", + "k8s.io/kubernetes/pkg/controller/serviceaccount", + "k8s.io/kubernetes/pkg/controller/statefulset", + "k8s.io/kubernetes/pkg/controller/testutil", + "k8s.io/kubernetes/pkg/controller/ttl", + "k8s.io/kubernetes/pkg/controller/ttlafterfinished", + "k8s.io/kubernetes/pkg/controller/util/node", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/util", + "k8s.io/kubernetes/pkg/controller/volume/events", + "k8s.io/kubernetes/pkg/controller/volume/expand", + "k8s.io/kubernetes/pkg/controller/volume/expand/cache", + "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", + "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics", + "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options", + "k8s.io/kubernetes/pkg/controller/volume/pvcprotection", + "k8s.io/kubernetes/pkg/controller/volume/pvprotection", + "k8s.io/kubernetes/pkg/features", + "k8s.io/kubernetes/pkg/kubectl/scheme", + "k8s.io/kubernetes/pkg/kubelet/apis", + "k8s.io/kubernetes/pkg/kubelet/events", + "k8s.io/kubernetes/pkg/kubelet/types", + "k8s.io/kubernetes/pkg/kubelet/util/format", + "k8s.io/kubernetes/pkg/quota", + "k8s.io/kubernetes/pkg/quota/evaluator/core", + "k8s.io/kubernetes/pkg/quota/generic", + "k8s.io/kubernetes/pkg/quota/install", + "k8s.io/kubernetes/pkg/registry/core/secret", + "k8s.io/kubernetes/pkg/scheduler/algorithm", + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", + "k8s.io/kubernetes/pkg/scheduler/cache", + "k8s.io/kubernetes/pkg/securitycontext", + "k8s.io/kubernetes/pkg/serviceaccount", + "k8s.io/kubernetes/pkg/util/goroutinemap", + "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff", + "k8s.io/kubernetes/pkg/util/hash", + "k8s.io/kubernetes/pkg/util/labels", + "k8s.io/kubernetes/pkg/util/metrics", + "k8s.io/kubernetes/pkg/util/mount", + "k8s.io/kubernetes/pkg/util/node", + "k8s.io/kubernetes/pkg/util/reflector/prometheus", + "k8s.io/kubernetes/pkg/util/slice", + "k8s.io/kubernetes/pkg/util/strings", + "k8s.io/kubernetes/pkg/util/system", + "k8s.io/kubernetes/pkg/util/taints", + "k8s.io/kubernetes/pkg/util/workqueue/prometheus", + "k8s.io/kubernetes/pkg/volume", + "k8s.io/kubernetes/pkg/volume/testing", + "k8s.io/kubernetes/pkg/volume/util", + "k8s.io/kubernetes/pkg/volume/util/operationexecutor", + "k8s.io/kubernetes/pkg/volume/util/recyclerclient", + "k8s.io/kubernetes/pkg/volume/util/types", + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler", + "k8s.io/kubernetes/pkg/api/service", + "k8s.io/kubernetes/pkg/api/v1/service", + "k8s.io/kubernetes/pkg/apis/networking", + "k8s.io/kubernetes/pkg/apis/policy", + "k8s.io/kubernetes/pkg/apis/scheduling", + "k8s.io/kubernetes/pkg/capabilities", + "k8s.io/kubernetes/pkg/master/ports", + "k8s.io/kubernetes/pkg/scheduler/api", + "k8s.io/kubernetes/pkg/scheduler/util", + "k8s.io/kubernetes/pkg/security/apparmor", + "k8s.io/kubernetes/pkg/util/file", + "k8s.io/kubernetes/pkg/util/net/sets", + "k8s.io/kubernetes/pkg/util/parsers", + "k8s.io/kubernetes/pkg/fieldpath", + "k8s.io/kubernetes/pkg/kubeapiserver/admission/util", + "k8s.io/kubernetes/pkg/scheduler/volumebinder", + "k8s.io/kubernetes/pkg/scheduler/internal/cache", + "k8s.io/kubernetes/pkg/util/nsenter", + "k8s.io/kubernetes/pkg/util/resizefs", + "k8s.io/kubernetes/pkg/util/version", + "k8s.io/kubernetes/pkg/apis/apps", + "k8s.io/kubernetes/pkg/version", + "k8s.io/kubernetes/pkg/util/io" + ] + }, + { + "SelectorRegexp": "k8s[.]io/(metrics/|utils/|csi-api/|heapster/|kube-controller-manager/)", + "AllowedPrefixes": [ + "k8s.io/csi-api/pkg/apis/csi/v1alpha1", + "k8s.io/csi-api/pkg/client/clientset/versioned", + "k8s.io/heapster/metrics/api/v1/types", + "k8s.io/kube-controller-manager/config/v1alpha1", + "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2", + "k8s.io/metrics/pkg/apis/external_metrics/v1beta1", + "k8s.io/metrics/pkg/apis/metrics/v1alpha1", + "k8s.io/metrics/pkg/apis/metrics/v1beta1", + "k8s.io/metrics/pkg/client/clientset/versioned/fake", + "k8s.io/metrics/pkg/client/clientset/versioned/scheme", + "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1", + "k8s.io/metrics/pkg/client/custom_metrics", + "k8s.io/metrics/pkg/client/custom_metrics/fake", + "k8s.io/metrics/pkg/client/external_metrics", + "k8s.io/metrics/pkg/client/external_metrics/fake", + "k8s.io/utils/pointer", + "k8s.io/utils/exec" + ] + }, + { + "SelectorRegexp": "golang[.]org/", + "AllowedPrefixes": [ + "golang.org/x/time/rate", + "golang.org/x/sys/unix", + "golang.org/x/oauth2", + "google.golang.org/api/compute/v1", + "google.golang.org/api/googleapi", + "google.golang.org/api/compute/v0.alpha", + "google.golang.org/api/container/v1", + "google.golang.org/api/compute/v0.beta", + "google.golang.org/api/tpu/v1" + ] } - ] } - diff --git a/vendor/k8s.io/kubernetes/pkg/controller/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/controller/BUILD.bazel index 1f4a24aeb3a51..58ca5018acf9d 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/controller/BUILD.bazel @@ -13,7 +13,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/controller", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", @@ -44,12 +43,13 @@ go_library( "//vendor/k8s.io/client-go/tools/watch:go_default_library", "//vendor/k8s.io/client-go/util/integer:go_default_library", "//vendor/k8s.io/client-go/util/retry:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", "//vendor/k8s.io/kubernetes/pkg/api/v1/pod:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core/validation:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/scheduler/algorithm:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/scheduler/api:go_default_library", "//vendor/k8s.io/kubernetes/pkg/serviceaccount:go_default_library", "//vendor/k8s.io/kubernetes/pkg/util/hash:go_default_library", "//vendor/k8s.io/kubernetes/pkg/util/taints:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go b/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go index bcf2182d0905e..caac5649d2f77 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go @@ -38,10 +38,12 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/serviceaccount" - "github.com/golang/glog" + "k8s.io/klog" ) // ControllerClientBuilder allows you to get clients and configs for controllers +// Please note a copy also exists in staging/src/k8s.io/cloud-provider/cloud.go +// TODO: Extract this into a separate controller utilities repo (issues/68947) type ControllerClientBuilder interface { Config(name string) (*restclient.Config, error) ConfigOrDie(name string) *restclient.Config @@ -63,7 +65,7 @@ func (b SimpleControllerClientBuilder) Config(name string) (*restclient.Config, func (b SimpleControllerClientBuilder) ConfigOrDie(name string) *restclient.Config { clientConfig, err := b.Config(name) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return clientConfig } @@ -79,7 +81,7 @@ func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface, func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interface { client, err := b.Client(name) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return client } @@ -144,15 +146,15 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro } validConfig, valid, err := b.getAuthenticatedConfig(sa, string(secret.Data[v1.ServiceAccountTokenKey])) if err != nil { - glog.Warningf("error validating API token for %s/%s in secret %s: %v", sa.Name, sa.Namespace, secret.Name, err) + klog.Warningf("error validating API token for %s/%s in secret %s: %v", sa.Name, sa.Namespace, secret.Name, err) // continue watching for good tokens return false, nil } if !valid { - glog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Name, sa.Namespace) + klog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Name, sa.Namespace) // try to delete the secret containing the invalid token if err := b.CoreClient.Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { - glog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Name, sa.Namespace, err) + klog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Name, sa.Namespace, err) } // continue watching for good tokens return false, nil @@ -206,14 +208,14 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}} if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(tokenReview); err == nil { if !tokenResult.Status.Authenticated { - glog.Warningf("Token for %s/%s did not authenticate correctly", sa.Name, sa.Namespace) + klog.Warningf("Token for %s/%s did not authenticate correctly", sa.Name, sa.Namespace) return nil, false, nil } if tokenResult.Status.User.Username != username { - glog.Warningf("Token for %s/%s authenticated as unexpected username: %s", sa.Name, sa.Namespace, tokenResult.Status.User.Username) + klog.Warningf("Token for %s/%s authenticated as unexpected username: %s", sa.Name, sa.Namespace, tokenResult.Status.User.Username) return nil, false, nil } - glog.V(4).Infof("Verified credential for %s/%s", sa.Name, sa.Namespace) + klog.V(4).Infof("Verified credential for %s/%s", sa.Name, sa.Namespace) return clientConfig, true, nil } @@ -227,7 +229,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, } err = client.Get().AbsPath("/apis").Do().Error() if apierrors.IsUnauthorized(err) { - glog.Warningf("Token for %s/%s did not authenticate correctly: %v", sa.Name, sa.Namespace, err) + klog.Warningf("Token for %s/%s did not authenticate correctly: %v", sa.Name, sa.Namespace, err) return nil, false, nil } @@ -237,7 +239,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, func (b SAControllerClientBuilder) ConfigOrDie(name string) *restclient.Config { clientConfig, err := b.Config(name) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return clientConfig } @@ -253,7 +255,7 @@ func (b SAControllerClientBuilder) Client(name string) (clientset.Interface, err func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface { client, err := b.Client(name) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return client } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go index 6cf2ac189464d..f63afaca6f359 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go @@ -20,7 +20,6 @@ import ( "fmt" "sync" - "github.com/golang/glog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -28,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" ) type BaseControllerRefManager struct { @@ -223,7 +223,7 @@ func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error { // ReleasePod sends a patch to free the pod from the control of the controller. // It returns the error if the patching fails. 404 and 422 errors are ignored. func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error { - glog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s", + klog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s", pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), pod.UID) err := m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(deleteOwnerRefPatch)) @@ -345,7 +345,7 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) er // ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller. // It returns the error if the patching fails. 404 and 422 errors are ignored. func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.ReplicaSet) error { - glog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s", + klog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s", replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID) err := m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, []byte(deleteOwnerRefPatch)) @@ -480,7 +480,7 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history // ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller. // It returns the error if the patching fails. 404 and 422 errors are ignored. func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error { - glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s", + klog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s", history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID) err := m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(deleteOwnerRefPatch)) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go index 394407b28b541..6ccc32aed0f8b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -47,11 +47,11 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" _ "k8s.io/kubernetes/pkg/apis/core/install" "k8s.io/kubernetes/pkg/apis/core/validation" - "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" hashutil "k8s.io/kubernetes/pkg/util/hash" taintutils "k8s.io/kubernetes/pkg/util/taints" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -90,7 +90,7 @@ var UpdateTaintBackoff = wait.Backoff{ } var ShutdownTaint = &v1.Taint{ - Key: algorithm.TaintNodeShutdown, + Key: schedulerapi.TaintNodeShutdown, Effect: v1.TaintEffectNoSchedule, } @@ -170,7 +170,7 @@ func (r *ControllerExpectations) GetExpectations(controllerKey string) (*Control func (r *ControllerExpectations) DeleteExpectations(controllerKey string) { if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists { if err := r.Delete(exp); err != nil { - glog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err) + klog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err) } } } @@ -181,24 +181,24 @@ func (r *ControllerExpectations) DeleteExpectations(controllerKey string) { func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) bool { if exp, exists, err := r.GetExpectations(controllerKey); exists { if exp.Fulfilled() { - glog.V(4).Infof("Controller expectations fulfilled %#v", exp) + klog.V(4).Infof("Controller expectations fulfilled %#v", exp) return true } else if exp.isExpired() { - glog.V(4).Infof("Controller expectations expired %#v", exp) + klog.V(4).Infof("Controller expectations expired %#v", exp) return true } else { - glog.V(4).Infof("Controller still waiting on expectations %#v", exp) + klog.V(4).Infof("Controller still waiting on expectations %#v", exp) return false } } else if err != nil { - glog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err) + klog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err) } else { // When a new controller is created, it doesn't have expectations. // When it doesn't see expected watch events for > TTL, the expectations expire. // - In this case it wakes up, creates/deletes controllees, and sets expectations again. // When it has satisfied expectations and no controllees need to be created/destroyed > TTL, the expectations expire. // - In this case it continues without setting expectations till it needs to create/delete controllees. - glog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey) + klog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey) } // Trigger a sync if we either encountered and error (which shouldn't happen since we're // getting from local store) or this controller hasn't established expectations. @@ -215,7 +215,7 @@ func (exp *ControlleeExpectations) isExpired() bool { // SetExpectations registers new expectations for the given controller. Forgets existing expectations. func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) error { exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: clock.RealClock{}.Now()} - glog.V(4).Infof("Setting expectations %#v", exp) + klog.V(4).Infof("Setting expectations %#v", exp) return r.Add(exp) } @@ -232,7 +232,7 @@ func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, de if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { exp.Add(int64(-add), int64(-del)) // The expectations might've been modified since the update on the previous line. - glog.V(4).Infof("Lowered expectations %#v", exp) + klog.V(4).Infof("Lowered expectations %#v", exp) } } @@ -241,7 +241,7 @@ func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, de if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { exp.Add(int64(add), int64(del)) // The expectations might've been modified since the update on the previous line. - glog.V(4).Infof("Raised expectations %#v", exp) + klog.V(4).Infof("Raised expectations %#v", exp) } } @@ -340,13 +340,13 @@ func (u *UIDTrackingControllerExpectations) ExpectDeletions(rcKey string, delete defer u.uidStoreLock.Unlock() if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 { - glog.Errorf("Clobbering existing delete keys: %+v", existing) + klog.Errorf("Clobbering existing delete keys: %+v", existing) } expectedUIDs := sets.NewString() for _, k := range deletedKeys { expectedUIDs.Insert(k) } - glog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys) + klog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys) if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil { return err } @@ -360,7 +360,7 @@ func (u *UIDTrackingControllerExpectations) DeletionObserved(rcKey, deleteKey st uids := u.GetUIDs(rcKey) if uids != nil && uids.Has(deleteKey) { - glog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey) + klog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey) u.ControllerExpectationsInterface.DeletionObserved(rcKey) uids.Delete(deleteKey) } @@ -375,7 +375,7 @@ func (u *UIDTrackingControllerExpectations) DeleteExpectations(rcKey string) { u.ControllerExpectationsInterface.DeleteExpectations(rcKey) if uidExp, exists, err := u.uidStore.GetByKey(rcKey); err == nil && exists { if err := u.uidStore.Delete(uidExp); err != nil { - glog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err) + klog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err) } } } @@ -581,10 +581,10 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT } else { accessor, err := meta.Accessor(object) if err != nil { - glog.Errorf("parentObject does not have ObjectMeta, %v", err) + klog.Errorf("parentObject does not have ObjectMeta, %v", err) return nil } - glog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name) + klog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name) r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name) } return nil @@ -595,7 +595,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime if err != nil { return fmt.Errorf("object does not have ObjectMeta, %v", err) } - glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) + klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) if err := r.KubeClient.CoreV1().Pods(namespace).Delete(podID, nil); err != nil && !apierrors.IsNotFound(err) { r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) return fmt.Errorf("unable to delete pods: %v", err) @@ -806,7 +806,7 @@ func FilterActivePods(pods []*v1.Pod) []*v1.Pod { if IsPodActive(p) { result = append(result, p) } else { - glog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v", + klog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v", p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp) } } @@ -1024,14 +1024,14 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n // indicating that the controller identified by controllerName is waiting for syncs, followed by // either a successful or failed sync. func WaitForCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) bool { - glog.Infof("Waiting for caches to sync for %s controller", controllerName) + klog.Infof("Waiting for caches to sync for %s controller", controllerName) if !cache.WaitForCacheSync(stopCh, cacheSyncs...) { utilruntime.HandleError(fmt.Errorf("Unable to sync caches for %s controller", controllerName)) return false } - glog.Infof("Caches are synced for %s controller", controllerName) + klog.Infof("Caches are synced for %s controller", controllerName) return true } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go deleted file mode 100644 index 42640c46a9f38..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go +++ /dev/null @@ -1,888 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "math" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/glog" - - apps "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - intstrutil "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" - appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" - "k8s.io/client-go/util/integer" - "k8s.io/kubernetes/pkg/controller" - labelsutil "k8s.io/kubernetes/pkg/util/labels" -) - -const ( - // RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence - RevisionAnnotation = "deployment.kubernetes.io/revision" - // RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment. - RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history" - // DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation - // in its replica sets. Helps in separating scaling events from the rollout process and for - // determining if the new replica set for a deployment is really saturated. - DesiredReplicasAnnotation = "deployment.kubernetes.io/desired-replicas" - // MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which - // is deployment.spec.replicas + maxSurge. Used by the underlying replica sets to estimate their - // proportions in case the deployment has surge replicas. - MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas" - - // RollbackRevisionNotFound is not found rollback event reason - RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound" - // RollbackTemplateUnchanged is the template unchanged rollback event reason - RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged" - // RollbackDone is the done rollback event reason - RollbackDone = "DeploymentRollback" - // Reasons for deployment conditions - // - // Progressing: - // - // ReplicaSetUpdatedReason is added in a deployment when one of its replica sets is updated as part - // of the rollout process. - ReplicaSetUpdatedReason = "ReplicaSetUpdated" - // FailedRSCreateReason is added in a deployment when it cannot create a new replica set. - FailedRSCreateReason = "ReplicaSetCreateError" - // NewReplicaSetReason is added in a deployment when it creates a new replica set. - NewReplicaSetReason = "NewReplicaSetCreated" - // FoundNewRSReason is added in a deployment when it adopts an existing replica set. - FoundNewRSReason = "FoundNewReplicaSet" - // NewRSAvailableReason is added in a deployment when its newest replica set is made available - // ie. the number of new pods that have passed readiness checks and run for at least minReadySeconds - // is at least the minimum available pods that need to run for the deployment. - NewRSAvailableReason = "NewReplicaSetAvailable" - // TimedOutReason is added in a deployment when its newest replica set fails to show any progress - // within the given deadline (progressDeadlineSeconds). - TimedOutReason = "ProgressDeadlineExceeded" - // PausedDeployReason is added in a deployment when it is paused. Lack of progress shouldn't be - // estimated once a deployment is paused. - PausedDeployReason = "DeploymentPaused" - // ResumedDeployReason is added in a deployment when it is resumed. Useful for not failing accidentally - // deployments that paused amidst a rollout and are bounded by a deadline. - ResumedDeployReason = "DeploymentResumed" - // - // Available: - // - // MinimumReplicasAvailable is added in a deployment when it has its minimum replicas required available. - MinimumReplicasAvailable = "MinimumReplicasAvailable" - // MinimumReplicasUnavailable is added in a deployment when it doesn't have the minimum required replicas - // available. - MinimumReplicasUnavailable = "MinimumReplicasUnavailable" -) - -// NewDeploymentCondition creates a new deployment condition. -func NewDeploymentCondition(condType apps.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *apps.DeploymentCondition { - return &apps.DeploymentCondition{ - Type: condType, - Status: status, - LastUpdateTime: metav1.Now(), - LastTransitionTime: metav1.Now(), - Reason: reason, - Message: message, - } -} - -// GetDeploymentCondition returns the condition with the provided type. -func GetDeploymentCondition(status apps.DeploymentStatus, condType apps.DeploymentConditionType) *apps.DeploymentCondition { - for i := range status.Conditions { - c := status.Conditions[i] - if c.Type == condType { - return &c - } - } - return nil -} - -// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that -// we are about to add already exists and has the same status and reason then we are not going to update. -func SetDeploymentCondition(status *apps.DeploymentStatus, condition apps.DeploymentCondition) { - currentCond := GetDeploymentCondition(*status, condition.Type) - if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason { - return - } - // Do not update lastTransitionTime if the status of the condition doesn't change. - if currentCond != nil && currentCond.Status == condition.Status { - condition.LastTransitionTime = currentCond.LastTransitionTime - } - newConditions := filterOutCondition(status.Conditions, condition.Type) - status.Conditions = append(newConditions, condition) -} - -// RemoveDeploymentCondition removes the deployment condition with the provided type. -func RemoveDeploymentCondition(status *apps.DeploymentStatus, condType apps.DeploymentConditionType) { - status.Conditions = filterOutCondition(status.Conditions, condType) -} - -// filterOutCondition returns a new slice of deployment conditions without conditions with the provided type. -func filterOutCondition(conditions []apps.DeploymentCondition, condType apps.DeploymentConditionType) []apps.DeploymentCondition { - var newConditions []apps.DeploymentCondition - for _, c := range conditions { - if c.Type == condType { - continue - } - newConditions = append(newConditions, c) - } - return newConditions -} - -// ReplicaSetToDeploymentCondition converts a replica set condition into a deployment condition. -// Useful for promoting replica set failure conditions into deployments. -func ReplicaSetToDeploymentCondition(cond apps.ReplicaSetCondition) apps.DeploymentCondition { - return apps.DeploymentCondition{ - Type: apps.DeploymentConditionType(cond.Type), - Status: cond.Status, - LastTransitionTime: cond.LastTransitionTime, - LastUpdateTime: cond.LastTransitionTime, - Reason: cond.Reason, - Message: cond.Message, - } -} - -// SetDeploymentRevision updates the revision for a deployment. -func SetDeploymentRevision(deployment *apps.Deployment, revision string) bool { - updated := false - - if deployment.Annotations == nil { - deployment.Annotations = make(map[string]string) - } - if deployment.Annotations[RevisionAnnotation] != revision { - deployment.Annotations[RevisionAnnotation] = revision - updated = true - } - - return updated -} - -// MaxRevision finds the highest revision in the replica sets -func MaxRevision(allRSs []*apps.ReplicaSet) int64 { - max := int64(0) - for _, rs := range allRSs { - if v, err := Revision(rs); err != nil { - // Skip the replica sets when it failed to parse their revision information - glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs) - } else if v > max { - max = v - } - } - return max -} - -// LastRevision finds the second max revision number in all replica sets (the last revision) -func LastRevision(allRSs []*apps.ReplicaSet) int64 { - max, secMax := int64(0), int64(0) - for _, rs := range allRSs { - if v, err := Revision(rs); err != nil { - // Skip the replica sets when it failed to parse their revision information - glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs) - } else if v >= max { - secMax = max - max = v - } else if v > secMax { - secMax = v - } - } - return secMax -} - -// Revision returns the revision number of the input object. -func Revision(obj runtime.Object) (int64, error) { - acc, err := meta.Accessor(obj) - if err != nil { - return 0, err - } - v, ok := acc.GetAnnotations()[RevisionAnnotation] - if !ok { - return 0, nil - } - return strconv.ParseInt(v, 10, 64) -} - -// SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and -// copying required deployment annotations to it; it returns true if replica set's annotation is changed. -func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool) bool { - // First, copy deployment's annotations (except for apply and revision annotations) - annotationChanged := copyDeploymentAnnotationsToReplicaSet(deployment, newRS) - // Then, update replica set's revision annotation - if newRS.Annotations == nil { - newRS.Annotations = make(map[string]string) - } - oldRevision, ok := newRS.Annotations[RevisionAnnotation] - // The newRS's revision should be the greatest among all RSes. Usually, its revision number is newRevision (the max revision number - // of all old RSes + 1). However, it's possible that some of the old RSes are deleted after the newRS revision being updated, and - // newRevision becomes smaller than newRS's revision. We should only update newRS revision when it's smaller than newRevision. - - oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64) - if err != nil { - if oldRevision != "" { - glog.Warningf("Updating replica set revision OldRevision not int %s", err) - return false - } - //If the RS annotation is empty then initialise it to 0 - oldRevisionInt = 0 - } - newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64) - if err != nil { - glog.Warningf("Updating replica set revision NewRevision not int %s", err) - return false - } - if oldRevisionInt < newRevisionInt { - newRS.Annotations[RevisionAnnotation] = newRevision - annotationChanged = true - glog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision) - } - // If a revision annotation already existed and this replica set was updated with a new revision - // then that means we are rolling back to this replica set. We need to preserve the old revisions - // for historical information. - if ok && annotationChanged { - revisionHistoryAnnotation := newRS.Annotations[RevisionHistoryAnnotation] - oldRevisions := strings.Split(revisionHistoryAnnotation, ",") - if len(oldRevisions[0]) == 0 { - newRS.Annotations[RevisionHistoryAnnotation] = oldRevision - } else { - oldRevisions = append(oldRevisions, oldRevision) - newRS.Annotations[RevisionHistoryAnnotation] = strings.Join(oldRevisions, ",") - } - } - // If the new replica set is about to be created, we need to add replica annotations to it. - if !exists && SetReplicasAnnotations(newRS, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+MaxSurge(*deployment)) { - annotationChanged = true - } - return annotationChanged -} - -var annotationsToSkip = map[string]bool{ - v1.LastAppliedConfigAnnotation: true, - RevisionAnnotation: true, - RevisionHistoryAnnotation: true, - DesiredReplicasAnnotation: true, - MaxReplicasAnnotation: true, - apps.DeprecatedRollbackTo: true, -} - -// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key -// TODO: How to decide which annotations should / should not be copied? -// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615 -func skipCopyAnnotation(key string) bool { - return annotationsToSkip[key] -} - -// copyDeploymentAnnotationsToReplicaSet copies deployment's annotations to replica set's annotations, -// and returns true if replica set's annotation is changed. -// Note that apply and revision annotations are not copied. -func copyDeploymentAnnotationsToReplicaSet(deployment *apps.Deployment, rs *apps.ReplicaSet) bool { - rsAnnotationsChanged := false - if rs.Annotations == nil { - rs.Annotations = make(map[string]string) - } - for k, v := range deployment.Annotations { - // newRS revision is updated automatically in getNewReplicaSet, and the deployment's revision number is then updated - // by copying its newRS revision number. We should not copy deployment's revision to its newRS, since the update of - // deployment revision number may fail (revision becomes stale) and the revision number in newRS is more reliable. - if skipCopyAnnotation(k) || rs.Annotations[k] == v { - continue - } - rs.Annotations[k] = v - rsAnnotationsChanged = true - } - return rsAnnotationsChanged -} - -// SetDeploymentAnnotationsTo sets deployment's annotations as given RS's annotations. -// This action should be done if and only if the deployment is rolling back to this rs. -// Note that apply and revision annotations are not changed. -func SetDeploymentAnnotationsTo(deployment *apps.Deployment, rollbackToRS *apps.ReplicaSet) { - deployment.Annotations = getSkippedAnnotations(deployment.Annotations) - for k, v := range rollbackToRS.Annotations { - if !skipCopyAnnotation(k) { - deployment.Annotations[k] = v - } - } -} - -func getSkippedAnnotations(annotations map[string]string) map[string]string { - skippedAnnotations := make(map[string]string) - for k, v := range annotations { - if skipCopyAnnotation(k) { - skippedAnnotations[k] = v - } - } - return skippedAnnotations -} - -// FindActiveOrLatest returns the only active or the latest replica set in case there is at most one active -// replica set. If there are more active replica sets, then we should proportionally scale them. -func FindActiveOrLatest(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) *apps.ReplicaSet { - if newRS == nil && len(oldRSs) == 0 { - return nil - } - - sort.Sort(sort.Reverse(controller.ReplicaSetsByCreationTimestamp(oldRSs))) - allRSs := controller.FilterActiveReplicaSets(append(oldRSs, newRS)) - - switch len(allRSs) { - case 0: - // If there is no active replica set then we should return the newest. - if newRS != nil { - return newRS - } - return oldRSs[0] - case 1: - return allRSs[0] - default: - return nil - } -} - -// GetDesiredReplicasAnnotation returns the number of desired replicas -func GetDesiredReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) { - return getIntFromAnnotation(rs, DesiredReplicasAnnotation) -} - -func getMaxReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) { - return getIntFromAnnotation(rs, MaxReplicasAnnotation) -} - -func getIntFromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, bool) { - annotationValue, ok := rs.Annotations[annotationKey] - if !ok { - return int32(0), false - } - intValue, err := strconv.Atoi(annotationValue) - if err != nil { - glog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name) - return int32(0), false - } - return int32(intValue), true -} - -// SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations -func SetReplicasAnnotations(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool { - updated := false - if rs.Annotations == nil { - rs.Annotations = make(map[string]string) - } - desiredString := fmt.Sprintf("%d", desiredReplicas) - if hasString := rs.Annotations[DesiredReplicasAnnotation]; hasString != desiredString { - rs.Annotations[DesiredReplicasAnnotation] = desiredString - updated = true - } - maxString := fmt.Sprintf("%d", maxReplicas) - if hasString := rs.Annotations[MaxReplicasAnnotation]; hasString != maxString { - rs.Annotations[MaxReplicasAnnotation] = maxString - updated = true - } - return updated -} - -// AnnotationsNeedUpdate return true if ReplicasAnnotations need to be updated -func ReplicasAnnotationsNeedUpdate(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool { - if rs.Annotations == nil { - return true - } - desiredString := fmt.Sprintf("%d", desiredReplicas) - if hasString := rs.Annotations[DesiredReplicasAnnotation]; hasString != desiredString { - return true - } - maxString := fmt.Sprintf("%d", maxReplicas) - if hasString := rs.Annotations[MaxReplicasAnnotation]; hasString != maxString { - return true - } - return false -} - -// MaxUnavailable returns the maximum unavailable pods a rolling deployment can take. -func MaxUnavailable(deployment apps.Deployment) int32 { - if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 { - return int32(0) - } - // Error caught by validation - _, maxUnavailable, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas)) - if maxUnavailable > *deployment.Spec.Replicas { - return *deployment.Spec.Replicas - } - return maxUnavailable -} - -// MinAvailable returns the minimum available pods of a given deployment -func MinAvailable(deployment *apps.Deployment) int32 { - if !IsRollingUpdate(deployment) { - return int32(0) - } - return *(deployment.Spec.Replicas) - MaxUnavailable(*deployment) -} - -// MaxSurge returns the maximum surge pods a rolling deployment can take. -func MaxSurge(deployment apps.Deployment) int32 { - if !IsRollingUpdate(&deployment) { - return int32(0) - } - // Error caught by validation - maxSurge, _, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas)) - return maxSurge -} - -// GetProportion will estimate the proportion for the provided replica set using 1. the current size -// of the parent deployment, 2. the replica count that needs be added on the replica sets of the -// deployment, and 3. the total replicas added in the replica sets of the deployment so far. -func GetProportion(rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 { - if rs == nil || *(rs.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded { - return int32(0) - } - - rsFraction := getReplicaSetFraction(*rs, d) - allowed := deploymentReplicasToAdd - deploymentReplicasAdded - - if deploymentReplicasToAdd > 0 { - // Use the minimum between the replica set fraction and the maximum allowed replicas - // when scaling up. This way we ensure we will not scale up more than the allowed - // replicas we can add. - return integer.Int32Min(rsFraction, allowed) - } - // Use the maximum between the replica set fraction and the maximum allowed replicas - // when scaling down. This way we ensure we will not scale down more than the allowed - // replicas we can remove. - return integer.Int32Max(rsFraction, allowed) -} - -// getReplicaSetFraction estimates the fraction of replicas a replica set can have in -// 1. a scaling event during a rollout or 2. when scaling a paused deployment. -func getReplicaSetFraction(rs apps.ReplicaSet, d apps.Deployment) int32 { - // If we are scaling down to zero then the fraction of this replica set is its whole size (negative) - if *(d.Spec.Replicas) == int32(0) { - return -*(rs.Spec.Replicas) - } - - deploymentReplicas := *(d.Spec.Replicas) + MaxSurge(d) - annotatedReplicas, ok := getMaxReplicasAnnotation(&rs) - if !ok { - // If we cannot find the annotation then fallback to the current deployment size. Note that this - // will not be an accurate proportion estimation in case other replica sets have different values - // which means that the deployment was scaled at some point but we at least will stay in limits - // due to the min-max comparisons in getProportion. - annotatedReplicas = d.Status.Replicas - } - - // We should never proportionally scale up from zero which means rs.spec.replicas and annotatedReplicas - // will never be zero here. - newRSsize := (float64(*(rs.Spec.Replicas) * deploymentReplicas)) / float64(annotatedReplicas) - return integer.RoundToInt32(newRSsize) - *(rs.Spec.Replicas) -} - -// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface. -// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -// The third returned value is the new replica set, and it may be nil if it doesn't exist yet. -func GetAllReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, *apps.ReplicaSet, error) { - rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) - if err != nil { - return nil, nil, nil, err - } - oldRSes, allOldRSes := FindOldReplicaSets(deployment, rsList) - newRS := FindNewReplicaSet(deployment, rsList) - return oldRSes, allOldRSes, newRS, nil -} - -// GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface. -// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -func GetOldReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, error) { - rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) - if err != nil { - return nil, nil, err - } - oldRSes, allOldRSes := FindOldReplicaSets(deployment, rsList) - return oldRSes, allOldRSes, nil -} - -// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface. -// Returns nil if the new replica set doesn't exist yet. -func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface) (*apps.ReplicaSet, error) { - rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) - if err != nil { - return nil, err - } - return FindNewReplicaSet(deployment, rsList), nil -} - -// RsListFromClient returns an rsListFunc that wraps the given client. -func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc { - return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) { - rsList, err := c.ReplicaSets(namespace).List(options) - if err != nil { - return nil, err - } - var ret []*apps.ReplicaSet - for i := range rsList.Items { - ret = append(ret, &rsList.Items[i]) - } - return ret, err - } -} - -// TODO: switch this to full namespacers -type RsListFunc func(string, metav1.ListOptions) ([]*apps.ReplicaSet, error) -type podListFunc func(string, metav1.ListOptions) (*v1.PodList, error) - -// ListReplicaSets returns a slice of RSes the given deployment targets. -// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), -// because only the controller itself should do that. -// However, it does filter out anything whose ControllerRef doesn't match. -func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps.ReplicaSet, error) { - // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector - // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830. - namespace := deployment.Namespace - selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) - if err != nil { - return nil, err - } - options := metav1.ListOptions{LabelSelector: selector.String()} - all, err := getRSList(namespace, options) - if err != nil { - return nil, err - } - // Only include those whose ControllerRef matches the Deployment. - owned := make([]*apps.ReplicaSet, 0, len(all)) - for _, rs := range all { - if metav1.IsControlledBy(rs, deployment) { - owned = append(owned, rs) - } - } - return owned, nil -} - -// ListPods returns a list of pods the given deployment targets. -// This needs a list of ReplicaSets for the Deployment, -// which can be found with ListReplicaSets(). -// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), -// because only the controller itself should do that. -// However, it does filter out anything whose ControllerRef doesn't match. -func ListPods(deployment *apps.Deployment, rsList []*apps.ReplicaSet, getPodList podListFunc) (*v1.PodList, error) { - namespace := deployment.Namespace - selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) - if err != nil { - return nil, err - } - options := metav1.ListOptions{LabelSelector: selector.String()} - all, err := getPodList(namespace, options) - if err != nil { - return all, err - } - // Only include those whose ControllerRef points to a ReplicaSet that is in - // turn owned by this Deployment. - rsMap := make(map[types.UID]bool, len(rsList)) - for _, rs := range rsList { - rsMap[rs.UID] = true - } - owned := &v1.PodList{Items: make([]v1.Pod, 0, len(all.Items))} - for i := range all.Items { - pod := &all.Items[i] - controllerRef := metav1.GetControllerOf(pod) - if controllerRef != nil && rsMap[controllerRef.UID] { - owned.Items = append(owned.Items, *pod) - } - } - return owned, nil -} - -// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] -// We ignore pod-template-hash because: -// 1. The hash result would be different upon podTemplateSpec API changes -// (e.g. the addition of a new field will cause the hash code to change) -// 2. The deployment template won't have hash labels -func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool { - t1Copy := template1.DeepCopy() - t2Copy := template2.DeepCopy() - // Remove hash labels from template.Labels before comparing - delete(t1Copy.Labels, apps.DefaultDeploymentUniqueLabelKey) - delete(t2Copy.Labels, apps.DefaultDeploymentUniqueLabelKey) - return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) -} - -// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). -func FindNewReplicaSet(deployment *apps.Deployment, rsList []*apps.ReplicaSet) *apps.ReplicaSet { - sort.Sort(controller.ReplicaSetsByCreationTimestamp(rsList)) - for i := range rsList { - if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) { - // In rare cases, such as after cluster upgrades, Deployment may end up with - // having more than one new ReplicaSets that have the same template as its template, - // see https://github.com/kubernetes/kubernetes/issues/40415 - // We deterministically choose the oldest new ReplicaSet. - return rsList[i] - } - } - // new ReplicaSet does not exist. - return nil -} - -// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes. -// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -func FindOldReplicaSets(deployment *apps.Deployment, rsList []*apps.ReplicaSet) ([]*apps.ReplicaSet, []*apps.ReplicaSet) { - var requiredRSs []*apps.ReplicaSet - var allRSs []*apps.ReplicaSet - newRS := FindNewReplicaSet(deployment, rsList) - for _, rs := range rsList { - // Filter out new replica set - if newRS != nil && rs.UID == newRS.UID { - continue - } - allRSs = append(allRSs, rs) - if *(rs.Spec.Replicas) != 0 { - requiredRSs = append(requiredRSs, rs) - } - } - return requiredRSs, allRSs -} - -// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment. -func SetFromReplicaSetTemplate(deployment *apps.Deployment, template v1.PodTemplateSpec) *apps.Deployment { - deployment.Spec.Template.ObjectMeta = template.ObjectMeta - deployment.Spec.Template.Spec = template.Spec - deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel( - deployment.Spec.Template.ObjectMeta.Labels, - apps.DefaultDeploymentUniqueLabelKey) - return deployment -} - -// GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets. -func GetReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 { - totalReplicas := int32(0) - for _, rs := range replicaSets { - if rs != nil { - totalReplicas += *(rs.Spec.Replicas) - } - } - return totalReplicas -} - -// GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets. -func GetActualReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 { - totalActualReplicas := int32(0) - for _, rs := range replicaSets { - if rs != nil { - totalActualReplicas += rs.Status.Replicas - } - } - return totalActualReplicas -} - -// GetReadyReplicaCountForReplicaSets returns the number of ready pods corresponding to the given replica sets. -func GetReadyReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 { - totalReadyReplicas := int32(0) - for _, rs := range replicaSets { - if rs != nil { - totalReadyReplicas += rs.Status.ReadyReplicas - } - } - return totalReadyReplicas -} - -// GetAvailableReplicaCountForReplicaSets returns the number of available pods corresponding to the given replica sets. -func GetAvailableReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 { - totalAvailableReplicas := int32(0) - for _, rs := range replicaSets { - if rs != nil { - totalAvailableReplicas += rs.Status.AvailableReplicas - } - } - return totalAvailableReplicas -} - -// IsRollingUpdate returns true if the strategy type is a rolling update. -func IsRollingUpdate(deployment *apps.Deployment) bool { - return deployment.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType -} - -// DeploymentComplete considers a deployment to be complete once all of its desired replicas -// are updated and available, and no old pods are running. -func DeploymentComplete(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool { - return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) && - newStatus.Replicas == *(deployment.Spec.Replicas) && - newStatus.AvailableReplicas == *(deployment.Spec.Replicas) && - newStatus.ObservedGeneration >= deployment.Generation -} - -// DeploymentProgressing reports progress for a deployment. Progress is estimated by comparing the -// current with the new status of the deployment that the controller is observing. More specifically, -// when new pods are scaled up or become ready or available, or old pods are scaled down, then we -// consider the deployment is progressing. -func DeploymentProgressing(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool { - oldStatus := deployment.Status - - // Old replicas that need to be scaled down - oldStatusOldReplicas := oldStatus.Replicas - oldStatus.UpdatedReplicas - newStatusOldReplicas := newStatus.Replicas - newStatus.UpdatedReplicas - - return (newStatus.UpdatedReplicas > oldStatus.UpdatedReplicas) || - (newStatusOldReplicas < oldStatusOldReplicas) || - newStatus.ReadyReplicas > deployment.Status.ReadyReplicas || - newStatus.AvailableReplicas > deployment.Status.AvailableReplicas -} - -// used for unit testing -var nowFn = func() time.Time { return time.Now() } - -// DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress -// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already -// exists. -func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool { - if !HasProgressDeadline(deployment) { - return false - } - - // Look for the Progressing condition. If it doesn't exist, we have no base to estimate progress. - // If it's already set with a TimedOutReason reason, we have already timed out, no need to check - // again. - condition := GetDeploymentCondition(*newStatus, apps.DeploymentProgressing) - if condition == nil { - return false - } - // If the previous condition has been a successful rollout then we shouldn't try to - // estimate any progress. Scenario: - // - // * progressDeadlineSeconds is smaller than the difference between now and the time - // the last rollout finished in the past. - // * the creation of a new ReplicaSet triggers a resync of the Deployment prior to the - // cached copy of the Deployment getting updated with the status.condition that indicates - // the creation of the new ReplicaSet. - // - // The Deployment will be resynced and eventually its Progressing condition will catch - // up with the state of the world. - if condition.Reason == NewRSAvailableReason { - return false - } - if condition.Reason == TimedOutReason { - return true - } - - // Look at the difference in seconds between now and the last time we reported any - // progress or tried to create a replica set, or resumed a paused deployment and - // compare against progressDeadlineSeconds. - from := condition.LastUpdateTime - now := nowFn() - delta := time.Duration(*deployment.Spec.ProgressDeadlineSeconds) * time.Second - timedOut := from.Add(delta).Before(now) - - glog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now) - return timedOut -} - -// NewRSNewReplicas calculates the number of replicas a deployment's new RS should have. -// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it. -// 1) The new RS is saturated: newRS's replicas == deployment's replicas -// 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas -func NewRSNewReplicas(deployment *apps.Deployment, allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) (int32, error) { - switch deployment.Spec.Strategy.Type { - case apps.RollingUpdateDeploymentStrategyType: - // Check if we can scale up. - maxSurge, err := intstrutil.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true) - if err != nil { - return 0, err - } - // Find the total number of pods - currentPodCount := GetReplicaCountForReplicaSets(allRSs) - maxTotalPods := *(deployment.Spec.Replicas) + int32(maxSurge) - if currentPodCount >= maxTotalPods { - // Cannot scale up. - return *(newRS.Spec.Replicas), nil - } - // Scale up. - scaleUpCount := maxTotalPods - currentPodCount - // Do not exceed the number of desired replicas. - scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(*(deployment.Spec.Replicas)-*(newRS.Spec.Replicas)))) - return *(newRS.Spec.Replicas) + scaleUpCount, nil - case apps.RecreateDeploymentStrategyType: - return *(deployment.Spec.Replicas), nil - default: - return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type) - } -} - -// IsSaturated checks if the new replica set is saturated by comparing its size with its deployment size. -// Both the deployment and the replica set have to believe this replica set can own all of the desired -// replicas in the deployment and the annotation helps in achieving that. All pods of the ReplicaSet -// need to be available. -func IsSaturated(deployment *apps.Deployment, rs *apps.ReplicaSet) bool { - if rs == nil { - return false - } - desiredString := rs.Annotations[DesiredReplicasAnnotation] - desired, err := strconv.Atoi(desiredString) - if err != nil { - return false - } - return *(rs.Spec.Replicas) == *(deployment.Spec.Replicas) && - int32(desired) == *(deployment.Spec.Replicas) && - rs.Status.AvailableReplicas == *(deployment.Spec.Replicas) -} - -// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration. -// Returns error if polling timesout. -func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error { - // TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface. - return wait.PollImmediate(interval, timeout, func() (bool, error) { - deployment, err := getDeploymentFunc() - if err != nil { - return false, err - } - return deployment.Status.ObservedGeneration >= desiredGeneration, nil - }) -} - -// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one -// step. For example: -// -// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) -// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) -// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) -// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) -// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) -// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) -func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { - surge, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true) - if err != nil { - return 0, 0, err - } - unavailable, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false) - if err != nil { - return 0, 0, err - } - - if surge == 0 && unavailable == 0 { - // Validation should never allow the user to explicitly use zero values for both maxSurge - // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. - // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the - // theory that surge might not work due to quota. - unavailable = 1 - } - - return int32(surge), int32(unavailable), nil -} - -func HasProgressDeadline(d *apps.Deployment) bool { - return d.Spec.ProgressDeadlineSeconds != nil && *d.Spec.ProgressDeadlineSeconds != math.MaxInt32 -} diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD.bazel index 9e749c3363b79..6829de334a115 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD.bazel @@ -13,8 +13,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/credentialprovider", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/docker/docker/api/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD.bazel index 7d1d9204ca632..422e1b016573a 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD.bazel @@ -11,7 +11,7 @@ go_library( "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/ecr:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/credentialprovider:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go index d889cbf1fa8b9..89869e76eaa69 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go @@ -26,7 +26,8 @@ import ( "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ecr" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/credentialprovider" ) @@ -46,7 +47,7 @@ func awsHandlerLogger(req *request.Request) { name = req.Operation.Name } - glog.V(3).Infof("AWS request: %s:%s in %s", service, name, *region) + klog.V(3).Infof("AWS request: %s:%s in %s", service, name, *region) } // An interface for testing purposes. @@ -100,7 +101,7 @@ func registryURL(region string) string { // This should be called only if using the AWS cloud provider. // This way, we avoid timeouts waiting for a non-existent provider. func RegisterCredentialsProvider(region string) { - glog.V(4).Infof("registering credentials provider for AWS region %q", region) + klog.V(4).Infof("registering credentials provider for AWS region %q", region) credentialprovider.RegisterCredentialProvider("aws-ecr-"+region, &lazyEcrProvider{ @@ -121,7 +122,7 @@ func (p *lazyEcrProvider) Enabled() bool { // provider only when we actually need it the first time. func (p *lazyEcrProvider) LazyProvide() *credentialprovider.DockerConfigEntry { if p.actualProvider == nil { - glog.V(2).Infof("Creating ecrProvider for %s", p.region) + klog.V(2).Infof("Creating ecrProvider for %s", p.region) p.actualProvider = &credentialprovider.CachingDockerConfigProvider{ Provider: newEcrProvider(p.region, nil), // Refresh credentials a little earlier than expiration time @@ -160,7 +161,7 @@ func newEcrProvider(region string, getter tokenGetter) *ecrProvider { // use ECR somehow? func (p *ecrProvider) Enabled() bool { if p.region == "" { - glog.Errorf("Called ecrProvider.Enabled() with no region set") + klog.Errorf("Called ecrProvider.Enabled() with no region set") return false } @@ -190,11 +191,11 @@ func (p *ecrProvider) Provide() credentialprovider.DockerConfig { params := &ecr.GetAuthorizationTokenInput{} output, err := p.getter.GetAuthorizationToken(params) if err != nil { - glog.Errorf("while requesting ECR authorization token %v", err) + klog.Errorf("while requesting ECR authorization token %v", err) return cfg } if output == nil { - glog.Errorf("Got back no ECR token") + klog.Errorf("Got back no ECR token") return cfg } @@ -203,7 +204,7 @@ func (p *ecrProvider) Provide() credentialprovider.DockerConfig { data.AuthorizationToken != nil { decodedToken, err := base64.StdEncoding.DecodeString(aws.StringValue(data.AuthorizationToken)) if err != nil { - glog.Errorf("while decoding token for endpoint %v %v", data.ProxyEndpoint, err) + klog.Errorf("while decoding token for endpoint %v %v", data.ProxyEndpoint, err) return cfg } parts := strings.SplitN(string(decodedToken), ":", 2) @@ -216,7 +217,7 @@ func (p *ecrProvider) Provide() credentialprovider.DockerConfig { Email: "not@val.id", } - glog.V(3).Infof("Adding credentials for user %s in %s", user, p.region) + klog.V(3).Infof("Adding credentials for user %s in %s", user, p.region) // Add our config entry for this region's registry URLs cfg[p.regionURL] = entry diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go index 433f28b15448f..a43e8c2b15d4a 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go @@ -27,7 +27,7 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // DockerConfigJson represents ~/.docker/config.json file info @@ -95,21 +95,21 @@ func ReadDockercfgFile(searchPaths []string) (cfg DockerConfig, err error) { for _, configPath := range searchPaths { absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configFileName)) if err != nil { - glog.Errorf("while trying to canonicalize %s: %v", configPath, err) + klog.Errorf("while trying to canonicalize %s: %v", configPath, err) continue } - glog.V(4).Infof("looking for .dockercfg at %s", absDockerConfigFileLocation) + klog.V(4).Infof("looking for .dockercfg at %s", absDockerConfigFileLocation) contents, err := ioutil.ReadFile(absDockerConfigFileLocation) if os.IsNotExist(err) { continue } if err != nil { - glog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) + klog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) continue } cfg, err := readDockerConfigFileFromBytes(contents) if err == nil { - glog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation) + klog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation) return cfg, nil } } @@ -125,18 +125,18 @@ func ReadDockerConfigJSONFile(searchPaths []string) (cfg DockerConfig, err error for _, configPath := range searchPaths { absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configJsonFileName)) if err != nil { - glog.Errorf("while trying to canonicalize %s: %v", configPath, err) + klog.Errorf("while trying to canonicalize %s: %v", configPath, err) continue } - glog.V(4).Infof("looking for %s at %s", configJsonFileName, absDockerConfigFileLocation) + klog.V(4).Infof("looking for %s at %s", configJsonFileName, absDockerConfigFileLocation) cfg, err = ReadSpecificDockerConfigJsonFile(absDockerConfigFileLocation) if err != nil { if !os.IsNotExist(err) { - glog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) + klog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) } continue } - glog.V(4).Infof("found valid %s at %s", configJsonFileName, absDockerConfigFileLocation) + klog.V(4).Infof("found valid %s at %s", configJsonFileName, absDockerConfigFileLocation) return cfg, nil } return nil, fmt.Errorf("couldn't find valid %s after checking in %v", configJsonFileName, searchPaths) @@ -188,7 +188,7 @@ func ReadUrl(url string, client *http.Client, header *http.Header) (body []byte, defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - glog.V(2).Infof("body of failing http response: %v", resp.Body) + klog.V(2).Infof("body of failing http response: %v", resp.Body) return nil, &HttpError{ StatusCode: resp.StatusCode, Url: url, @@ -213,7 +213,7 @@ func ReadDockerConfigFileFromUrl(url string, client *http.Client, header *http.H func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) { if err = json.Unmarshal(contents, &cfg); err != nil { - glog.Errorf("while trying to parse blob %q: %v", contents, err) + klog.Errorf("while trying to parse blob %q: %v", contents, err) return nil, err } return @@ -222,7 +222,7 @@ func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error func readDockerConfigJsonFileFromBytes(contents []byte) (cfg DockerConfig, err error) { var cfgJson DockerConfigJson if err = json.Unmarshal(contents, &cfgJson); err != nil { - glog.Errorf("while trying to parse blob %q: %v", contents, err) + klog.Errorf("while trying to parse blob %q: %v", contents, err) return nil, err } cfg = cfgJson.Auths diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go index b269f474600f4..6f5fad5fc4783 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go @@ -23,9 +23,8 @@ import ( "sort" "strings" - "github.com/golang/glog" + "k8s.io/klog" - dockertypes "github.com/docker/docker/api/types" "k8s.io/apimachinery/pkg/util/sets" ) @@ -52,17 +51,39 @@ type lazyDockerKeyring struct { Providers []DockerConfigProvider } +// AuthConfig contains authorization information for connecting to a Registry +// This type mirrors "github.com/docker/docker/api/types.AuthConfig" +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} + // LazyAuthConfiguration wraps dockertypes.AuthConfig, potentially deferring its // binding. If Provider is non-nil, it will be used to obtain new credentials // by calling LazyProvide() on it. type LazyAuthConfiguration struct { - dockertypes.AuthConfig + AuthConfig Provider DockerConfigProvider } func DockerConfigEntryToLazyAuthConfiguration(ident DockerConfigEntry) LazyAuthConfiguration { return LazyAuthConfiguration{ - AuthConfig: dockertypes.AuthConfig{ + AuthConfig: AuthConfig{ Username: ident.Username, Password: ident.Password, Email: ident.Email, @@ -92,7 +113,7 @@ func (dk *BasicDockerKeyring) Add(cfg DockerConfig) { } parsed, err := url.Parse(value) if err != nil { - glog.Errorf("Entry %q in dockercfg invalid (%v), ignoring", loc, err) + klog.Errorf("Entry %q in dockercfg invalid (%v), ignoring", loc, err) continue } diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go index c817fefa2b7e3..5ea3a000e8bcf 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go @@ -21,7 +21,7 @@ import ( "sort" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // All registered credential providers. @@ -38,9 +38,9 @@ func RegisterCredentialProvider(name string, provider DockerConfigProvider) { defer providersMutex.Unlock() _, found := providers[name] if found { - glog.Fatalf("Credential provider %q was registered twice", name) + klog.Fatalf("Credential provider %q was registered twice", name) } - glog.V(4).Infof("Registered credential provider %q", name) + klog.V(4).Infof("Registered credential provider %q", name) providers[name] = provider } @@ -61,7 +61,7 @@ func NewDockerKeyring() DockerKeyring { for _, key := range stringKeys { provider := providers[key] if provider.Enabled() { - glog.V(4).Infof("Registering credential provider: %v", key) + klog.V(4).Infof("Registering credential provider: %v", key) keyring.Providers = append(keyring.Providers, provider) } } diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go index 419dc43e5dfdf..16b4e601a10a6 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go @@ -22,8 +22,7 @@ import ( "sync" "time" - dockertypes "github.com/docker/docker/api/types" - "github.com/golang/glog" + "k8s.io/klog" ) // DockerConfigProvider is the interface that registered extensions implement @@ -40,14 +39,12 @@ type DockerConfigProvider interface { LazyProvide() *DockerConfigEntry } -func LazyProvide(creds LazyAuthConfiguration) dockertypes.AuthConfig { +func LazyProvide(creds LazyAuthConfiguration) AuthConfig { if creds.Provider != nil { entry := *creds.Provider.LazyProvide() return DockerConfigEntryToLazyAuthConfiguration(entry).AuthConfig - } else { - return creds.AuthConfig } - + return creds.AuthConfig } // A DockerConfigProvider that simply reads the .dockercfg file @@ -86,7 +83,7 @@ func (d *defaultDockerConfigProvider) Provide() DockerConfig { if cfg, err := ReadDockerConfigFile(); err == nil { return cfg } else if !os.IsNotExist(err) { - glog.V(4).Infof("Unable to parse Docker config file: %v", err) + klog.V(4).Infof("Unable to parse Docker config file: %v", err) } return DockerConfig{} } @@ -116,7 +113,7 @@ func (d *CachingDockerConfigProvider) Provide() DockerConfig { return d.cacheDockerConfig } - glog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String()) + klog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String()) d.cacheDockerConfig = d.Provider.Provide() d.expiration = time.Now().Add(d.Lifetime) return d.cacheDockerConfig diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index c03e93c6c83a6..cd35ecb704463 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -49,6 +49,7 @@ const ( // owner: @vishh // alpha: v1.5 // + // DEPRECATED - This feature is deprecated by Pod Priority and Preemption as of Kubernetes 1.13. // Ensures guaranteed scheduling of pods marked with a special pod annotation `scheduler.alpha.kubernetes.io/critical-pod` // and also prevents them from being evicted from a node. // Note: This feature is not supported for `BestEffort` pods. @@ -60,8 +61,8 @@ const ( // Enables support for Device Plugins DevicePlugins utilfeature.Feature = "DevicePlugins" - // owner: @gmarek - // alpha: v1.6 + // owner: @Huang-Wei + // beta: v1.13 // // Changes the logic behind evicting Pods from not ready Nodes // to take advantage of NoExecute Taints and Tolerations. @@ -139,6 +140,7 @@ const ( // owner: @jsafrane // GA: v1.12 // + // Note: This feature gate is unconditionally enabled in v1.13 and will be removed in v1.14. // Enable mount propagation of volumes. MountPropagation utilfeature.Feature = "MountPropagation" @@ -186,14 +188,13 @@ const ( MountContainers utilfeature.Feature = "MountContainers" // owner: @msau42 - // alpha: v1.9 + // GA: v1.13 // // Extend the default scheduler to be aware of PV topology and handle PV binding - // Before moving to beta, resolve Kubernetes issue #56180 VolumeScheduling utilfeature.Feature = "VolumeScheduling" // owner: @vladimirvivien - // beta: v1.10 + // GA: v1.13 // // Enable mount/attachment of Container Storage Interface (CSI) backed PVs CSIPersistentVolume utilfeature.Feature = "CSIPersistentVolume" @@ -216,6 +217,7 @@ const ( // owner: @screeley44 // alpha: v1.9 + // beta: v1.13 // // Enable Block volume support in containers. BlockVolume utilfeature.Feature = "BlockVolume" @@ -268,6 +270,14 @@ const ( // Enable ServiceAccountTokenVolumeProjection support in ProjectedVolumes. TokenRequestProjection utilfeature.Feature = "TokenRequestProjection" + // owner: @mikedanese + // alpha: v1.13 + // + // Migrate ServiceAccount volumes to use a projected volume consisting of a + // ServiceAccountTokenVolumeProjection. This feature adds new required flags + // to the API server. + BoundServiceAccountTokenVolume utilfeature.Feature = "BoundServiceAccountTokenVolume" + // owner: @Random-Liu // beta: v1.11 // @@ -275,7 +285,7 @@ const ( CRIContainerLogRotation utilfeature.Feature = "CRIContainerLogRotation" // owner: @verult - // beta: v1.10 + // GA: v1.13 // // Enables the regional PD feature on GCE. GCERegionalPersistentDisk utilfeature.Feature = "GCERegionalPersistentDisk" @@ -322,7 +332,7 @@ const ( VolumeSubpathEnvExpansion utilfeature.Feature = "VolumeSubpathEnvExpansion" // owner: @vikaschoudhary16 - // alpha: v1.11 + // GA: v1.13 // // // Enable probe based plugin watcher utility for discovering Kubelet plugins @@ -377,6 +387,12 @@ const ( // // Allow TTL controller to clean up Pods and Jobs after they finish. TTLAfterFinished utilfeature.Feature = "TTLAfterFinished" + + // owner: @dashpole + // alpha: v1.13 + // + // Enables the kubelet's pod resources grpc endpoint + KubeletPodResources utilfeature.Feature = "KubeletPodResources" ) func init() { @@ -387,12 +403,12 @@ func init() { // To add a new feature, define a key for it above and add it here. The features will be // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ - AppArmor: {Default: true, PreRelease: utilfeature.Beta}, - DynamicKubeletConfig: {Default: true, PreRelease: utilfeature.Beta}, + AppArmor: {Default: true, PreRelease: utilfeature.Beta}, + DynamicKubeletConfig: {Default: true, PreRelease: utilfeature.Beta}, ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta}, ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha}, DevicePlugins: {Default: true, PreRelease: utilfeature.Beta}, - TaintBasedEvictions: {Default: false, PreRelease: utilfeature.Alpha}, + TaintBasedEvictions: {Default: true, PreRelease: utilfeature.Beta}, RotateKubeletServerCertificate: {Default: true, PreRelease: utilfeature.Beta}, RotateKubeletClientCertificate: {Default: true, PreRelease: utilfeature.Beta}, PersistentLocalVolumes: {Default: true, PreRelease: utilfeature.Beta}, @@ -413,12 +429,12 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS CPUCFSQuotaPeriod: {Default: false, PreRelease: utilfeature.Alpha}, ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, MountContainers: {Default: false, PreRelease: utilfeature.Alpha}, - VolumeScheduling: {Default: true, PreRelease: utilfeature.Beta}, - CSIPersistentVolume: {Default: true, PreRelease: utilfeature.Beta}, + VolumeScheduling: {Default: true, PreRelease: utilfeature.GA}, + CSIPersistentVolume: {Default: true, PreRelease: utilfeature.GA}, CSIDriverRegistry: {Default: false, PreRelease: utilfeature.Alpha}, CSINodeInfo: {Default: false, PreRelease: utilfeature.Alpha}, CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta}, - BlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, + BlockVolume: {Default: true, PreRelease: utilfeature.Beta}, StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.GA}, ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha}, SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.GA}, @@ -427,14 +443,15 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS ScheduleDaemonSetPods: {Default: true, PreRelease: utilfeature.Beta}, TokenRequest: {Default: true, PreRelease: utilfeature.Beta}, TokenRequestProjection: {Default: true, PreRelease: utilfeature.Beta}, + BoundServiceAccountTokenVolume: {Default: false, PreRelease: utilfeature.Alpha}, CRIContainerLogRotation: {Default: true, PreRelease: utilfeature.Beta}, - GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.Beta}, + GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.GA}, RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha}, VolumeSubpath: {Default: true, PreRelease: utilfeature.GA}, BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha}, PodReadinessGates: {Default: true, PreRelease: utilfeature.Beta}, VolumeSubpathEnvExpansion: {Default: false, PreRelease: utilfeature.Alpha}, - KubeletPluginsWatcher: {Default: true, PreRelease: utilfeature.Beta}, + KubeletPluginsWatcher: {Default: true, PreRelease: utilfeature.GA}, ResourceQuotaScopeSelectors: {Default: true, PreRelease: utilfeature.Beta}, CSIBlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, RuntimeClass: {Default: false, PreRelease: utilfeature.Alpha}, @@ -443,20 +460,23 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS VolumeSnapshotDataSource: {Default: false, PreRelease: utilfeature.Alpha}, ProcMountType: {Default: false, PreRelease: utilfeature.Alpha}, TTLAfterFinished: {Default: false, PreRelease: utilfeature.Alpha}, + KubeletPodResources: {Default: false, PreRelease: utilfeature.Alpha}, // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: genericfeatures.StreamingProxyRedirects: {Default: true, PreRelease: utilfeature.Beta}, genericfeatures.AdvancedAuditing: {Default: true, PreRelease: utilfeature.GA}, + genericfeatures.DynamicAuditing: {Default: false, PreRelease: utilfeature.Alpha}, genericfeatures.APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha}, genericfeatures.Initializers: {Default: false, PreRelease: utilfeature.Alpha}, genericfeatures.APIListChunking: {Default: true, PreRelease: utilfeature.Beta}, - genericfeatures.DryRun: {Default: false, PreRelease: utilfeature.Alpha}, + genericfeatures.DryRun: {Default: true, PreRelease: utilfeature.Beta}, // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: - apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, - apiextensionsfeatures.CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta}, + apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, + apiextensionsfeatures.CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta}, + apiextensionsfeatures.CustomResourceWebhookConversion: {Default: false, PreRelease: utilfeature.Alpha}, // features that enable backwards compatibility but are scheduled to be removed // ... diff --git a/vendor/k8s.io/kubernetes/pkg/generated/doc.go b/vendor/k8s.io/kubernetes/pkg/generated/doc.go deleted file mode 100644 index f17a557129647..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/generated/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// generated package is the destination for all generated files. Not all generated files are currently use this package -// but the plan is to move as much of them as possible to this package. -package generated diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions b/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions index 79f6b5a9384d6..6cc204640d31d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions @@ -122,6 +122,7 @@ "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util", "k8s.io/kubernetes/pkg/scheduler/api", "k8s.io/kubernetes/pkg/scheduler/cache", + "k8s.io/kubernetes/pkg/scheduler/internal/cache", "k8s.io/kubernetes/pkg/scheduler/util", "k8s.io/kubernetes/pkg/scheduler/volumebinder", "k8s.io/kubernetes/pkg/security/apparmor", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD.bazel index e19cd0266d2c4..b362c73ca856d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD.bazel @@ -4,69 +4,34 @@ go_library( name = "go_default_library", srcs = [ "apply.go", - "autoscale.go", - "clusterrolebinding.go", "conditions.go", - "configmap.go", - "deployment.go", "doc.go", - "env_file.go", - "generate.go", "history.go", "interfaces.go", - "namespace.go", - "pdb.go", - "priorityclass.go", - "quota.go", - "rolebinding.go", "rollback.go", "rolling_updater.go", "rollout_status.go", - "run.go", "scale.go", - "secret.go", - "secret_for_docker_registry.go", - "secret_for_tls.go", - "service.go", - "service_basic.go", - "serviceaccount.go", - "sorter.go", ], importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl", importpath = "k8s.io/kubernetes/pkg/kubectl", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/spf13/cobra:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", - "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/api/batch/v1beta1:go_default_library", - "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/api/policy/v1beta1:go_default_library", - "//vendor/k8s.io/api/rbac/v1:go_default_library", - "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", - "//vendor/k8s.io/api/scheduling/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", @@ -75,24 +40,13 @@ go_library( "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/util/integer:go_default_library", - "//vendor/k8s.io/client-go/util/jsonpath:go_default_library", "//vendor/k8s.io/client-go/util/retry:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/pod:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/v1/pod:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core/v1:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/controller/deployment/util:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/credentialprovider:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/apps:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/util:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/hash:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/util/slice:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/printers:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/printers/internalversion:go_default_library", - "//vendor/vbom.ml/util/sortorder:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go index 3c82aee808e65..78447d498cf06 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go @@ -135,7 +135,8 @@ func CreateApplyAnnotation(obj runtime.Object, codec runtime.Encoder) error { return setOriginalConfiguration(obj, modified) } -// Create the annotation used by kubectl apply only when createAnnotation is true +// CreateOrUpdateAnnotation creates the annotation used by +// kubectl apply only when createAnnotation is true // Otherwise, only update the annotation when it already exists func CreateOrUpdateAnnotation(createAnnotation bool, obj runtime.Object, codec runtime.Encoder) error { if createAnnotation { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/BUILD.bazel deleted file mode 100644 index ab88b5abbcb21..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "element.go", - "empty_element.go", - "error.go", - "list_element.go", - "map_element.go", - "primitive_element.go", - "type_element.go", - "visitor.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/apply", - importpath = "k8s.io/kubernetes/pkg/kubectl/apply", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/doc.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/doc.go deleted file mode 100644 index 6bf3fcbedf1c9..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/doc.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apply - -// This package is used for creating and applying patches generated -// from a last recorded config, local config, remote object. -// Example usage for a test: -// -//fakeSchema := tst.Fake{Path: swaggerPath} -//s, err := fakeSchema.OpenAPISchema() -//Expect(err).To(BeNil()) -//resources, err := openapi.NewOpenAPIData(s) -//Expect(err).To(BeNil()) -//elementParser := parse.Factory{resources} -// -//obj, err := parser.CreateElement(recorded, local, remote) -//Expect(err).Should(Not(HaveOccurred())) -// -//merged, err := obj.Merge(strategy.Create(strategy.Options{})) -// diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/element.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/element.go deleted file mode 100644 index 8e7e14800f191..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/element.go +++ /dev/null @@ -1,423 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apply - -import ( - "fmt" -) - -// Element contains the record, local, and remote value for a field in an object -// and metadata about the field read from openapi. -// Calling Merge on an element will apply the passed in strategy to Element - -// e.g. either replacing the whole element with the local copy or merging each -// of the recorded, local and remote fields of the element. -type Element interface { - // FieldMeta specifies which merge strategy to use for this element - FieldMeta - - // Merge merges the recorded, local and remote values in the element using the Strategy - // provided as an argument. Calls the type specific method on the Strategy - following the - // "Accept" method from the "Visitor" pattern. - // e.g. Merge on a ListElement will call Strategy.MergeList(self) - // Returns the Result of the merged elements - Merge(Strategy) (Result, error) - - // HasRecorded returns true if the field was explicitly - // present in the recorded source. This is to differentiate between - // undefined and set to null - HasRecorded() bool - - // GetRecorded returns the field value from the recorded source of the object - GetRecorded() interface{} - - // HasLocal returns true if the field was explicitly - // present in the local source. This is to differentiate between - // undefined and set to null - HasLocal() bool - - // GetLocal returns the field value from the local source of the object - GetLocal() interface{} - - // HasRemote returns true if the field was explicitly - // present in the remote source. This is to differentiate between - // undefined and set to null - HasRemote() bool - - // GetRemote returns the field value from the remote source of the object - GetRemote() interface{} -} - -// FieldMeta defines the strategy used to apply a Patch for an element -type FieldMeta interface { - // GetFieldMergeType returns the type of merge strategy to use for this field - // maybe "merge", "replace" or "retainkeys" - // TODO: There maybe multiple strategies, so this may need to be a slice, map, or struct - // Address this in a follow up in the PR to introduce retainkeys strategy - GetFieldMergeType() string - - // GetFieldMergeKeys returns the merge key to use when the MergeType is "merge" and underlying type is a list - GetFieldMergeKeys() MergeKeys - - // GetFieldType returns the openapi field type - e.g. primitive, array, map, type, reference - GetFieldType() string -} - -// FieldMetaImpl implements FieldMeta -type FieldMetaImpl struct { - // MergeType is the type of merge strategy to use for this field - // maybe "merge", "replace" or "retainkeys" - MergeType string - - // MergeKeys are the merge keys to use when the MergeType is "merge" and underlying type is a list - MergeKeys MergeKeys - - // Type is the openapi type of the field - "list", "primitive", "map" - Type string - - // Name contains name of the field - Name string -} - -// GetFieldMergeType implements FieldMeta.GetFieldMergeType -func (s FieldMetaImpl) GetFieldMergeType() string { - return s.MergeType -} - -// GetFieldMergeKeys implements FieldMeta.GetFieldMergeKeys -func (s FieldMetaImpl) GetFieldMergeKeys() MergeKeys { - return s.MergeKeys -} - -// GetFieldType implements FieldMeta.GetFieldType -func (s FieldMetaImpl) GetFieldType() string { - return s.Type -} - -// MergeKeyValue records the value of the mergekey for an item in a list -type MergeKeyValue map[string]string - -// Equal returns true if the MergeKeyValues share the same value, -// representing the same item in a list -func (v MergeKeyValue) Equal(o MergeKeyValue) bool { - if len(v) != len(o) { - return false - } - - for key, v1 := range v { - if v2, found := o[key]; !found || v1 != v2 { - return false - } - } - - return true -} - -// MergeKeys is the set of fields on an object that uniquely identify -// and is used when merging lists to identify the "same" object -// independent of the ordering of the objects -type MergeKeys []string - -// GetMergeKeyValue parses the MergeKeyValue from an item in a list -func (mk MergeKeys) GetMergeKeyValue(i interface{}) (MergeKeyValue, error) { - result := MergeKeyValue{} - if len(mk) <= 0 { - return result, fmt.Errorf("merge key must have at least 1 value to merge") - } - m, ok := i.(map[string]interface{}) - if !ok { - return result, fmt.Errorf("cannot use mergekey %v for primitive item in list %v", mk, i) - } - for _, field := range mk { - if value, found := m[field]; !found { - result[field] = "" - } else { - result[field] = fmt.Sprintf("%v", value) - } - } - return result, nil -} - -type source int - -const ( - recorded source = iota - local - remote -) - -// CombinedPrimitiveSlice implements a slice of primitives -type CombinedPrimitiveSlice struct { - Items []*PrimitiveListItem -} - -// PrimitiveListItem represents a single value in a slice of primitives -type PrimitiveListItem struct { - // Value is the value of the primitive, should match recorded, local and remote - Value interface{} - - RawElementData -} - -// Contains returns true if the slice contains the l -func (s *CombinedPrimitiveSlice) lookup(l interface{}) *PrimitiveListItem { - val := fmt.Sprintf("%v", l) - for _, i := range s.Items { - if fmt.Sprintf("%v", i.Value) == val { - return i - } - } - return nil -} - -func (s *CombinedPrimitiveSlice) upsert(l interface{}) *PrimitiveListItem { - // Return the item if it exists - if item := s.lookup(l); item != nil { - return item - } - - // Otherwise create a new item and append to the list - item := &PrimitiveListItem{ - Value: l, - } - s.Items = append(s.Items, item) - return item -} - -// UpsertRecorded adds l to the slice. If there is already a value of l in the -// slice for either the local or remote, set on that value as the recorded value -// Otherwise append a new item to the list with the recorded value. -func (s *CombinedPrimitiveSlice) UpsertRecorded(l interface{}) { - v := s.upsert(l) - v.recorded = l - v.recordedSet = true -} - -// UpsertLocal adds l to the slice. If there is already a value of l in the -// slice for either the recorded or remote, set on that value as the local value -// Otherwise append a new item to the list with the local value. -func (s *CombinedPrimitiveSlice) UpsertLocal(l interface{}) { - v := s.upsert(l) - v.local = l - v.localSet = true -} - -// UpsertRemote adds l to the slice. If there is already a value of l in the -// slice for either the local or recorded, set on that value as the remote value -// Otherwise append a new item to the list with the remote value. -func (s *CombinedPrimitiveSlice) UpsertRemote(l interface{}) { - v := s.upsert(l) - v.remote = l - v.remoteSet = true -} - -// ListItem represents a single value in a slice of maps or types -type ListItem struct { - // KeyValue is the merge key value of the item - KeyValue MergeKeyValue - - // RawElementData contains the field values - RawElementData -} - -// CombinedMapSlice is a slice of maps or types with merge keys -type CombinedMapSlice struct { - Items []*ListItem -} - -// Lookup returns the ListItem matching the merge key, or nil if not found. -func (s *CombinedMapSlice) lookup(v MergeKeyValue) *ListItem { - for _, i := range s.Items { - if i.KeyValue.Equal(v) { - return i - } - } - return nil -} - -func (s *CombinedMapSlice) upsert(key MergeKeys, l interface{}) (*ListItem, error) { - // Get the identity of the item - val, err := key.GetMergeKeyValue(l) - if err != nil { - return nil, err - } - - // Return the item if it exists - if item := s.lookup(val); item != nil { - return item, nil - } - - // Otherwise create a new item and append to the list - item := &ListItem{ - KeyValue: val, - } - s.Items = append(s.Items, item) - return item, nil -} - -// UpsertRecorded adds l to the slice. If there is already a value of l sharing -// l's merge key in the slice for either the local or remote, set l the recorded value -// Otherwise append a new item to the list with the recorded value. -func (s *CombinedMapSlice) UpsertRecorded(key MergeKeys, l interface{}) error { - item, err := s.upsert(key, l) - if err != nil { - return err - } - item.SetRecorded(l) - return nil -} - -// UpsertLocal adds l to the slice. If there is already a value of l sharing -// l's merge key in the slice for either the recorded or remote, set l the local value -// Otherwise append a new item to the list with the local value. -func (s *CombinedMapSlice) UpsertLocal(key MergeKeys, l interface{}) error { - item, err := s.upsert(key, l) - if err != nil { - return err - } - item.SetLocal(l) - return nil -} - -// UpsertRemote adds l to the slice. If there is already a value of l sharing -// l's merge key in the slice for either the recorded or local, set l the remote value -// Otherwise append a new item to the list with the remote value. -func (s *CombinedMapSlice) UpsertRemote(key MergeKeys, l interface{}) error { - item, err := s.upsert(key, l) - if err != nil { - return err - } - item.SetRemote(l) - return nil -} - -// IsDrop returns true if the field represented by e should be dropped from the merged object -func IsDrop(e Element) bool { - // Specified in the last value recorded value and since deleted from the local - removed := e.HasRecorded() && !e.HasLocal() - - // Specified locally and explicitly set to null - setToNil := e.HasLocal() && e.GetLocal() == nil - - return removed || setToNil -} - -// IsAdd returns true if the field represented by e should have the local value directly -// added to the merged object instead of merging the recorded, local and remote values -func IsAdd(e Element) bool { - // If it isn't already present in the remote value and is present in the local value - return e.HasLocal() && !e.HasRemote() -} - -// NewRawElementData returns a new RawElementData, setting IsSet to true for -// non-nil values, and leaving IsSet false for nil values. -// Note: use this only when you want a nil-value to be considered "unspecified" -// (ignore) and not "unset" (deleted). -func NewRawElementData(recorded, local, remote interface{}) RawElementData { - data := RawElementData{} - if recorded != nil { - data.SetRecorded(recorded) - } - if local != nil { - data.SetLocal(local) - } - if remote != nil { - data.SetRemote(remote) - } - return data -} - -// RawElementData contains the raw recorded, local and remote data -// and metadata about whethere or not each was set -type RawElementData struct { - HasElementData - - recorded interface{} - local interface{} - remote interface{} -} - -// SetRecorded sets the recorded value -func (b *RawElementData) SetRecorded(value interface{}) { - b.recorded = value - b.recordedSet = true -} - -// SetLocal sets the local value -func (b *RawElementData) SetLocal(value interface{}) { - b.local = value - b.localSet = true -} - -// SetRemote sets the remote value -func (b *RawElementData) SetRemote(value interface{}) { - b.remote = value - b.remoteSet = true -} - -// GetRecorded implements Element.GetRecorded -func (b RawElementData) GetRecorded() interface{} { - // https://golang.org/doc/faq#nil_error - if b.recorded == nil { - return nil - } - return b.recorded -} - -// GetLocal implements Element.GetLocal -func (b RawElementData) GetLocal() interface{} { - // https://golang.org/doc/faq#nil_error - if b.local == nil { - return nil - } - return b.local -} - -// GetRemote implements Element.GetRemote -func (b RawElementData) GetRemote() interface{} { - // https://golang.org/doc/faq#nil_error - if b.remote == nil { - return nil - } - return b.remote -} - -// HasElementData contains whether a field was set in the recorded, local and remote sources -type HasElementData struct { - recordedSet bool - localSet bool - remoteSet bool -} - -// HasRecorded implements Element.HasRecorded -func (e HasElementData) HasRecorded() bool { - return e.recordedSet -} - -// HasLocal implements Element.HasLocal -func (e HasElementData) HasLocal() bool { - return e.localSet -} - -// HasRemote implements Element.HasRemote -func (e HasElementData) HasRemote() bool { - return e.remoteSet -} - -// ConflictDetector defines the capability to detect conflict. An element can examine remote/recorded value to detect conflict. -type ConflictDetector interface { - HasConflict() error -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/empty_element.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/empty_element.go deleted file mode 100644 index 1ca9090b5542c..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/empty_element.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apply - -// EmptyElement is a placeholder for when no value is set for a field so its type is unknown -type EmptyElement struct { - // FieldMetaImpl contains metadata about the field from openapi - FieldMetaImpl -} - -// Merge implements Element.Merge -func (e EmptyElement) Merge(v Strategy) (Result, error) { - return v.MergeEmpty(e) -} - -// IsAdd implements Element.IsAdd -func (e EmptyElement) IsAdd() bool { - return false -} - -// IsDelete implements Element.IsDelete -func (e EmptyElement) IsDelete() bool { - return false -} - -// GetRecorded implements Element.GetRecorded -func (e EmptyElement) GetRecorded() interface{} { - return nil -} - -// GetLocal implements Element.GetLocal -func (e EmptyElement) GetLocal() interface{} { - return nil -} - -// GetRemote implements Element.GetRemote -func (e EmptyElement) GetRemote() interface{} { - return nil -} - -// HasRecorded implements Element.HasRecorded -func (e EmptyElement) HasRecorded() bool { - return false -} - -// HasLocal implements Element.HasLocal -func (e EmptyElement) HasLocal() bool { - return false -} - -// HasRemote implements Element.IsAdd -func (e EmptyElement) HasRemote() bool { - return false -} - -var _ Element = &EmptyElement{} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/list_element.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/list_element.go deleted file mode 100644 index b271d38821459..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/list_element.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apply - -// ListElement contains the recorded, local and remote values for a field -// of type list -type ListElement struct { - // FieldMetaImpl contains metadata about the field from openapi - FieldMetaImpl - - ListElementData - - // Values contains the combined recorded-local-remote value of each item in the list - // Present for lists that can be merged only. Contains the items - // from each of the 3 lists merged into single Elements using - // the merge-key. - Values []Element -} - -// Merge implements Element.Merge -func (e ListElement) Merge(v Strategy) (Result, error) { - return v.MergeList(e) -} - -var _ Element = &ListElement{} - -// ListElementData contains the recorded, local and remote data for a list -type ListElementData struct { - RawElementData -} - -// GetRecordedList returns the Recorded value as a list -func (e ListElementData) GetRecordedList() []interface{} { - return sliceCast(e.recorded) -} - -// GetLocalList returns the Local value as a list -func (e ListElementData) GetLocalList() []interface{} { - return sliceCast(e.local) -} - -// GetRemoteList returns the Remote value as a list -func (e ListElementData) GetRemoteList() []interface{} { - return sliceCast(e.remote) -} - -// sliceCast casts i to a slice if it is non-nil, otherwise returns nil -func sliceCast(i interface{}) []interface{} { - if i == nil { - return nil - } - return i.([]interface{}) -} - -// HasConflict returns ConflictError if fields in recorded and remote of ListElement conflict -func (e ListElement) HasConflict() error { - for _, item := range e.Values { - if item, ok := item.(ConflictDetector); ok { - if err := item.HasConflict(); err != nil { - return err - } - } - } - return nil -} - -var _ ConflictDetector = &ListElement{} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/map_element.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/map_element.go deleted file mode 100644 index c24ac40199e89..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/map_element.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apply - -// MapElement contains the recorded, local and remote values for a field -// of type map -type MapElement struct { - // FieldMetaImpl contains metadata about the field from openapi - FieldMetaImpl - - // MapElementData contains the value a field was set to - MapElementData - - // Values contains the combined recorded-local-remote value of each item in the map - // Values contains the values in mapElement. Element must contain - // a Name matching its key in Values - Values map[string]Element -} - -// Merge implements Element.Merge -func (e MapElement) Merge(v Strategy) (Result, error) { - return v.MergeMap(e) -} - -// GetValues implements Element.GetValues -func (e MapElement) GetValues() map[string]Element { - return e.Values -} - -var _ Element = &MapElement{} - -// MapElementData contains the recorded, local and remote data for a map or type -type MapElementData struct { - RawElementData -} - -// GetRecordedMap returns the Recorded value as a map -func (e MapElementData) GetRecordedMap() map[string]interface{} { - return mapCast(e.recorded) -} - -// GetLocalMap returns the Local value as a map -func (e MapElementData) GetLocalMap() map[string]interface{} { - return mapCast(e.local) -} - -// GetRemoteMap returns the Remote value as a map -func (e MapElementData) GetRemoteMap() map[string]interface{} { - return mapCast(e.remote) -} - -// mapCast casts i to a map if it is non-nil, otherwise returns nil -func mapCast(i interface{}) map[string]interface{} { - if i == nil { - return nil - } - return i.(map[string]interface{}) -} - -// HasConflict returns ConflictError if some elements in map conflict. -func (e MapElement) HasConflict() error { - for _, item := range e.GetValues() { - if item, ok := item.(ConflictDetector); ok { - if err := item.HasConflict(); err != nil { - return err - } - } - } - return nil -} - -var _ ConflictDetector = &MapElement{} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/BUILD.bazel deleted file mode 100644 index 95a3cbf8c7641..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "factory.go", - "item.go", - "list_element.go", - "map_element.go", - "openapi.go", - "primitive_element.go", - "type_element.go", - "util.go", - "visitor.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse", - importpath = "k8s.io/kubernetes/pkg/kubectl/apply/parse", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/apply:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/factory.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/factory.go deleted file mode 100644 index c9cc8fb2e2ca4..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/factory.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "fmt" - "reflect" - - "k8s.io/kube-openapi/pkg/util/proto" - "k8s.io/kubernetes/pkg/kubectl/apply" - "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" -) - -// Factory creates an Element by combining object values from recorded, local and remote sources with -// the metadata from an openapi schema. -type Factory struct { - // Resources contains the openapi field metadata for the object models - Resources openapi.Resources -} - -// CreateElement returns an Element by collating the recorded, local and remote field values -func (b *Factory) CreateElement(recorded, local, remote map[string]interface{}) (apply.Element, error) { - // Create an Item from the 3 values. Use empty name for field - visitor := &ElementBuildingVisitor{b.Resources} - - gvk, err := getCommonGroupVersionKind(recorded, local, remote) - if err != nil { - return nil, err - } - - // Get the openapi object metadata - s := visitor.resources.LookupResource(gvk) - oapiKind, err := getKind(s) - if err != nil { - return nil, err - } - - data := apply.NewRawElementData(recorded, local, remote) - fieldName := "" - item, err := visitor.getItem(oapiKind, fieldName, data) - if err != nil { - return nil, err - } - - // Collate each field of the item into a combined Element - return item.CreateElement(visitor) -} - -// getItem returns the appropriate Item based on the underlying type of the arguments -func (v *ElementBuildingVisitor) getItem(s proto.Schema, name string, data apply.RawElementData) (Item, error) { - kind, err := getType(data.GetRecorded(), data.GetLocal(), data.GetRemote()) - if err != nil { - return nil, err - } - if kind == nil { - // All of the items values are nil. - return &emptyItem{Name: name}, nil - } - - // Create an item matching the type - switch kind.Kind() { - case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, - reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, - reflect.String: - p, err := getPrimitive(s) - if err != nil { - return nil, fmt.Errorf("expected openapi Primitive, was %T for %v (%v)", s, kind, err) - } - return &primitiveItem{name, p, data}, nil - case reflect.Array, reflect.Slice: - a, err := getArray(s) - if err != nil { - return nil, fmt.Errorf("expected openapi Array, was %T for %v (%v)", s, kind, err) - } - return &listItem{ - Name: name, - Array: a, - ListElementData: apply.ListElementData{ - RawElementData: data, - }, - }, nil - case reflect.Map: - if k, err := getKind(s); err == nil { - return &typeItem{ - Name: name, - Type: k, - MapElementData: apply.MapElementData{ - RawElementData: data, - }, - }, nil - } - // If it looks like a map, and no openapi type is found, default to mapItem - m, err := getMap(s) - if err != nil { - return nil, fmt.Errorf("expected openapi Kind or Map, was %T for %v (%v)", s, kind, err) - } - return &mapItem{ - Name: name, - Map: m, - MapElementData: apply.MapElementData{ - RawElementData: data, - }, - }, nil - } - return nil, fmt.Errorf("unsupported type type %v", kind) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/item.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/item.go deleted file mode 100644 index 15eae075f7d85..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/item.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "k8s.io/kube-openapi/pkg/util/proto" - "k8s.io/kubernetes/pkg/kubectl/apply" -) - -// Item wraps values from 3 sources (recorded, local, remote). -// The values are not collated -type Item interface { - // CreateElement merges the values in the item into a combined Element - CreateElement(ItemVisitor) (apply.Element, error) -} - -// primitiveItem contains a recorded, local, and remote value -type primitiveItem struct { - Name string - Primitive *proto.Primitive - - apply.RawElementData -} - -func (i *primitiveItem) CreateElement(v ItemVisitor) (apply.Element, error) { - return v.CreatePrimitiveElement(i) -} - -func (i *primitiveItem) GetMeta() proto.Schema { - // https://golang.org/doc/faq#nil_error - if i.Primitive != nil { - return i.Primitive - } - return nil -} - -// listItem contains a recorded, local, and remote list -type listItem struct { - Name string - Array *proto.Array - - apply.ListElementData -} - -func (i *listItem) CreateElement(v ItemVisitor) (apply.Element, error) { - return v.CreateListElement(i) -} - -func (i *listItem) GetMeta() proto.Schema { - // https://golang.org/doc/faq#nil_error - if i.Array != nil { - return i.Array - } - return nil -} - -// mapItem contains a recorded, local, and remote map -type mapItem struct { - Name string - Map *proto.Map - - apply.MapElementData -} - -func (i *mapItem) CreateElement(v ItemVisitor) (apply.Element, error) { - return v.CreateMapElement(i) -} - -func (i *mapItem) GetMeta() proto.Schema { - // https://golang.org/doc/faq#nil_error - if i.Map != nil { - return i.Map - } - return nil -} - -// mapItem contains a recorded, local, and remote map -type typeItem struct { - Name string - Type *proto.Kind - - apply.MapElementData -} - -func (i *typeItem) GetMeta() proto.Schema { - // https://golang.org/doc/faq#nil_error - if i.Type != nil { - return i.Type - } - return nil -} - -func (i *typeItem) CreateElement(v ItemVisitor) (apply.Element, error) { - return v.CreateTypeElement(i) -} - -// emptyItem contains no values -type emptyItem struct { - Name string -} - -func (i *emptyItem) CreateElement(v ItemVisitor) (apply.Element, error) { - e := &apply.EmptyElement{} - e.Name = i.Name - return e, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/list_element.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/list_element.go deleted file mode 100644 index 487a5f19ff5c7..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/list_element.go +++ /dev/null @@ -1,199 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "fmt" - - "k8s.io/kube-openapi/pkg/util/proto" - "k8s.io/kubernetes/pkg/kubectl/apply" -) - -// Contains the heavy lifting for finding tuples of matching elements in lists based on the merge key -// and then uses the canonical order derived from the orders in the recorded, local and remote lists. - -// replaceListElement builds a ListElement for a listItem. -// Uses the "merge" strategy to identify "same" elements across lists by a "merge key" -func (v ElementBuildingVisitor) mergeListElement(meta apply.FieldMetaImpl, item *listItem) (*apply.ListElement, error) { - subtype := getSchemaType(item.Array.SubType) - switch subtype { - case "primitive": - return v.doPrimitiveList(meta, item) - case "map", "kind", "reference": - return v.doMapList(meta, item) - default: - return nil, fmt.Errorf("Cannot merge lists with subtype %s", subtype) - } -} - -// doPrimitiveList merges 3 lists of primitives together -// tries to maintain ordering -func (v ElementBuildingVisitor) doPrimitiveList(meta apply.FieldMetaImpl, item *listItem) (*apply.ListElement, error) { - result := &apply.ListElement{ - FieldMetaImpl: apply.FieldMetaImpl{ - MergeType: apply.MergeStrategy, - Name: item.Name, - }, - ListElementData: item.ListElementData, - Values: []apply.Element{}, - } - - // Use locally defined order, then add remote, then add recorded. - orderedKeys := &apply.CombinedPrimitiveSlice{} - - // Locally defined items come first and retain their order - // as defined locally - for _, l := range item.GetLocalList() { - orderedKeys.UpsertLocal(l) - } - // Mixin remote values, adding any that are not present locally - for _, l := range item.GetRemoteList() { - orderedKeys.UpsertRemote(l) - } - // Mixin recorded values, adding any that are not present locally - // or remotely - for _, l := range item.GetRecordedList() { - orderedKeys.UpsertRecorded(l) - } - - for i, l := range orderedKeys.Items { - var s proto.Schema - if item.Array != nil && item.Array.SubType != nil { - s = item.Array.SubType - } - - subitem, err := v.getItem(s, fmt.Sprintf("%d", i), l.RawElementData) - - if err != nil { - return nil, err - } - - // Convert the Item to an Element - newelem, err := subitem.CreateElement(v) - if err != nil { - return nil, err - } - - // Append the element to the list - result.Values = append(result.Values, newelem) - } - - return result, nil -} - -// doMapList merges 3 lists of maps together by collating their values. -// tries to retain ordering -func (v ElementBuildingVisitor) doMapList(meta apply.FieldMetaImpl, item *listItem) (*apply.ListElement, error) { - key := meta.GetFieldMergeKeys() - result := &apply.ListElement{ - FieldMetaImpl: apply.FieldMetaImpl{ - MergeType: apply.MergeStrategy, - MergeKeys: key, - Name: item.Name, - }, - ListElementData: item.ListElementData, - Values: []apply.Element{}, - } - - // Use locally defined order, then add remote, then add recorded. - orderedKeys := &apply.CombinedMapSlice{} - - // Locally defined items come first and retain their order - // as defined locally - for _, l := range item.GetLocalList() { - orderedKeys.UpsertLocal(key, l) - } - // Mixin remote values, adding any that are not present locally - for _, l := range item.GetRemoteList() { - orderedKeys.UpsertRemote(key, l) - } - // Mixin recorded values, adding any that are not present locally - // or remotely - for _, l := range item.GetRecordedList() { - orderedKeys.UpsertRecorded(key, l) - } - - for i, l := range orderedKeys.Items { - var s proto.Schema - if item.Array != nil && item.Array.SubType != nil { - s = item.Array.SubType - } - subitem, err := v.getItem(s, fmt.Sprintf("%d", i), l.RawElementData) - if err != nil { - return nil, err - } - - // Build the element fully - newelem, err := subitem.CreateElement(v) - if err != nil { - return nil, err - } - - // Append the element to the list - result.Values = append(result.Values, newelem) - } - - return result, nil -} - -// replaceListElement builds a new ListElement from a listItem -// Uses the "replace" strategy and identify "same" elements across lists by their index -func (v ElementBuildingVisitor) replaceListElement(meta apply.FieldMetaImpl, item *listItem) (*apply.ListElement, error) { - meta.Name = item.Name - result := &apply.ListElement{ - FieldMetaImpl: meta, - ListElementData: item.ListElementData, - Values: []apply.Element{}, - } - - // Use the max length to iterate over the slices - for i := 0; i < max(len(item.GetRecordedList()), len(item.GetLocalList()), len(item.GetRemoteList())); i++ { - - // Lookup the item from each list - data := apply.RawElementData{} - if recorded, recordedSet := boundsSafeLookup(i, item.GetRecordedList()); recordedSet { - data.SetRecorded(recorded) - } - if local, localSet := boundsSafeLookup(i, item.GetLocalList()); localSet { - data.SetLocal(local) - } - if remote, remoteSet := boundsSafeLookup(i, item.GetRemoteList()); remoteSet { - data.SetRemote(remote) - } - - // Create the Item - var s proto.Schema - if item.Array != nil && item.Array.SubType != nil { - s = item.Array.SubType - } - subitem, err := v.getItem(s, fmt.Sprintf("%d", i), data) - if err != nil { - return nil, err - } - - // Build the element - newelem, err := subitem.CreateElement(v) - if err != nil { - return nil, err - } - - // Append the element to the list - result.Values = append(result.Values, newelem) - } - - return result, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/map_element.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/map_element.go deleted file mode 100644 index 0580ab1ba11cd..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/map_element.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "k8s.io/kube-openapi/pkg/util/proto" - "k8s.io/kubernetes/pkg/kubectl/apply" -) - -// mapElement builds a new mapElement from a mapItem -func (v ElementBuildingVisitor) mapElement(meta apply.FieldMetaImpl, item *mapItem) (*apply.MapElement, error) { - // Function to return schema type of the map values - var fn schemaFn = func(string) proto.Schema { - // All map values share the same schema - if item.Map != nil && item.Map.SubType != nil { - return item.Map.SubType - } - return nil - } - - // Collect same fields from multiple maps into a map of elements - values, err := v.createMapValues(fn, meta, item.MapElementData) - if err != nil { - return nil, err - } - - // Return the result - return &apply.MapElement{ - FieldMetaImpl: meta, - MapElementData: item.MapElementData, - Values: values, - }, nil -} - -// schemaFn returns the schema for a field or map value based on its name or key -type schemaFn func(key string) proto.Schema - -// createMapValues combines the recorded, local and remote values from -// data into a map of elements. -func (v ElementBuildingVisitor) createMapValues( - schemaFn schemaFn, - meta apply.FieldMetaImpl, - data apply.MapElementData) (map[string]apply.Element, error) { - - // Collate each key in the map - values := map[string]apply.Element{} - for _, key := range keysUnion(data.GetRecordedMap(), data.GetLocalMap(), data.GetRemoteMap()) { - combined := apply.RawElementData{} - if recorded, recordedSet := nilSafeLookup(key, data.GetRecordedMap()); recordedSet { - combined.SetRecorded(recorded) - } - if local, localSet := nilSafeLookup(key, data.GetLocalMap()); localSet { - combined.SetLocal(local) - } - if remote, remoteSet := nilSafeLookup(key, data.GetRemoteMap()); remoteSet { - combined.SetRemote(remote) - } - - // Create an item for the field - field, err := v.getItem(schemaFn(key), key, combined) - if err != nil { - return nil, err - } - - // Build the element for this field - element, err := field.CreateElement(v) - if err != nil { - return nil, err - } - - // Add the field element to the map - values[key] = element - } - return values, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/openapi.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/openapi.go deleted file mode 100644 index a55594de4b18d..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/openapi.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "fmt" - "strings" - - "k8s.io/kube-openapi/pkg/util/proto" -) - -// Contains functions for casting openapi interfaces to their underlying types - -// getSchemaType returns the string type of the schema - e.g. array, primitive, map, kind, reference -func getSchemaType(schema proto.Schema) string { - if schema == nil { - return "" - } - visitor := &baseSchemaVisitor{} - schema.Accept(visitor) - return visitor.Kind -} - -// getKind converts schema to an *proto.Kind object -func getKind(schema proto.Schema) (*proto.Kind, error) { - if schema == nil { - return nil, nil - } - visitor := &kindSchemaVisitor{} - schema.Accept(visitor) - return visitor.Result, visitor.Err -} - -// getArray converts schema to an *proto.Array object -func getArray(schema proto.Schema) (*proto.Array, error) { - if schema == nil { - return nil, nil - } - visitor := &arraySchemaVisitor{} - schema.Accept(visitor) - return visitor.Result, visitor.Err -} - -// getMap converts schema to an *proto.Map object -func getMap(schema proto.Schema) (*proto.Map, error) { - if schema == nil { - return nil, nil - } - visitor := &mapSchemaVisitor{} - schema.Accept(visitor) - return visitor.Result, visitor.Err -} - -// getPrimitive converts schema to an *proto.Primitive object -func getPrimitive(schema proto.Schema) (*proto.Primitive, error) { - if schema == nil { - return nil, nil - } - visitor := &primitiveSchemaVisitor{} - schema.Accept(visitor) - return visitor.Result, visitor.Err -} - -type baseSchemaVisitor struct { - Err error - Kind string -} - -// VisitArray implements openapi -func (v *baseSchemaVisitor) VisitArray(array *proto.Array) { - v.Kind = "array" - v.Err = fmt.Errorf("Array type not expected") -} - -// MergeMap implements openapi -func (v *baseSchemaVisitor) VisitMap(*proto.Map) { - v.Kind = "map" - v.Err = fmt.Errorf("Map type not expected") -} - -// MergePrimitive implements openapi -func (v *baseSchemaVisitor) VisitPrimitive(*proto.Primitive) { - v.Kind = "primitive" - v.Err = fmt.Errorf("Primitive type not expected") -} - -// VisitKind implements openapi -func (v *baseSchemaVisitor) VisitKind(*proto.Kind) { - v.Kind = "kind" - v.Err = fmt.Errorf("Kind type not expected") -} - -// VisitReference implements openapi -func (v *baseSchemaVisitor) VisitReference(reference proto.Reference) { - v.Kind = "reference" - v.Err = fmt.Errorf("Reference type not expected") -} - -type kindSchemaVisitor struct { - baseSchemaVisitor - Result *proto.Kind -} - -// VisitKind implements openapi -func (v *kindSchemaVisitor) VisitKind(result *proto.Kind) { - v.Result = result - v.Kind = "kind" -} - -// VisitReference implements openapi -func (v *kindSchemaVisitor) VisitReference(reference proto.Reference) { - reference.SubSchema().Accept(v) - if v.Err == nil { - v.Err = copyExtensions(reference.GetPath().String(), reference.GetExtensions(), v.Result.Extensions) - } -} - -func copyExtensions(field string, from, to map[string]interface{}) error { - // Copy extensions from field to type for references - for key, val := range from { - if curr, found := to[key]; found { - // Don't allow the same extension to be defined both on the field and on the type - return fmt.Errorf("Cannot override value for extension %s on field %s from %v to %v", - key, field, curr, val) - } - to[key] = val - } - return nil -} - -type mapSchemaVisitor struct { - baseSchemaVisitor - Result *proto.Map -} - -// MergeMap implements openapi -func (v *mapSchemaVisitor) VisitMap(result *proto.Map) { - v.Result = result - v.Kind = "map" -} - -// VisitReference implements openapi -func (v *mapSchemaVisitor) VisitReference(reference proto.Reference) { - reference.SubSchema().Accept(v) - if v.Err == nil { - v.Err = copyExtensions(reference.GetPath().String(), reference.GetExtensions(), v.Result.Extensions) - } -} - -type arraySchemaVisitor struct { - baseSchemaVisitor - Result *proto.Array -} - -// VisitArray implements openapi -func (v *arraySchemaVisitor) VisitArray(result *proto.Array) { - v.Result = result - v.Kind = "array" - v.Err = copySubElementPatchStrategy(result.Path.String(), result.GetExtensions(), result.SubType.GetExtensions()) -} - -// copyPatchStrategy copies the strategies to subelements to the subtype -// e.g. PodTemplate.Volumes is a []Volume with "x-kubernetes-patch-strategy": "merge,retainKeys" -// the "retainKeys" strategy applies to merging Volumes, and must be copied to the sub element -func copySubElementPatchStrategy(field string, from, to map[string]interface{}) error { - // Check if the parent has a patch strategy extension - if ext, found := from["x-kubernetes-patch-strategy"]; found { - strategy, ok := ext.(string) - if !ok { - return fmt.Errorf("Expected string value for x-kubernetes-patch-strategy on %s, was %T", - field, ext) - } - // Check of the parent patch strategy has a sub patch strategy, and if so copy to the sub type - if strings.Contains(strategy, ",") { - strategies := strings.Split(strategy, ",") - if len(strategies) != 2 { - // Only 1 sub strategy is supported - return fmt.Errorf( - "Expected between 0 and 2 elements for x-kubernetes-patch-merge-strategy by got %v", - strategies) - } - to["x-kubernetes-patch-strategy"] = strategies[1] - } - } - return nil -} - -// MergePrimitive implements openapi -func (v *arraySchemaVisitor) VisitReference(reference proto.Reference) { - reference.SubSchema().Accept(v) - if v.Err == nil { - v.Err = copyExtensions(reference.GetPath().String(), reference.GetExtensions(), v.Result.Extensions) - } -} - -type primitiveSchemaVisitor struct { - baseSchemaVisitor - Result *proto.Primitive -} - -// MergePrimitive implements openapi -func (v *primitiveSchemaVisitor) VisitPrimitive(result *proto.Primitive) { - v.Result = result - v.Kind = "primitive" -} - -// VisitReference implements openapi -func (v *primitiveSchemaVisitor) VisitReference(reference proto.Reference) { - reference.SubSchema().Accept(v) - if v.Err == nil { - v.Err = copyExtensions(reference.GetPath().String(), reference.GetExtensions(), v.Result.Extensions) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/primitive_element.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/primitive_element.go deleted file mode 100644 index de393e86a5065..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/primitive_element.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import "k8s.io/kubernetes/pkg/kubectl/apply" - -// primitiveElement builds a new primitiveElement from a PrimitiveItem -func (v ElementBuildingVisitor) primitiveElement(item *primitiveItem) (*apply.PrimitiveElement, error) { - meta := apply.FieldMetaImpl{Name: item.Name} - return &apply.PrimitiveElement{ - FieldMetaImpl: meta, - RawElementData: item.RawElementData, - }, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/type_element.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/type_element.go deleted file mode 100644 index e2fa1d8b68244..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/type_element.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "k8s.io/kube-openapi/pkg/util/proto" - "k8s.io/kubernetes/pkg/kubectl/apply" -) - -// typeElement builds a new mapElement from a typeItem -func (v ElementBuildingVisitor) typeElement(meta apply.FieldMetaImpl, item *typeItem) (*apply.TypeElement, error) { - // Function to get the schema of a field from its key - var fn schemaFn = func(key string) proto.Schema { - if item.Type != nil && item.Type.Fields != nil { - return item.Type.Fields[key] - } - return nil - } - - // Collect same fields from multiple maps into a map of elements - values, err := v.createMapValues(fn, meta, item.MapElementData) - if err != nil { - return nil, err - } - - // Return the result - return &apply.TypeElement{ - FieldMetaImpl: meta, - MapElementData: item.MapElementData, - Values: values, - }, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/util.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/util.go deleted file mode 100644 index 676ab7d3d0f80..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/util.go +++ /dev/null @@ -1,181 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "fmt" - "reflect" - "strings" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kube-openapi/pkg/util/proto" - "k8s.io/kubernetes/pkg/kubectl/apply" -) - -// nilSafeLookup returns the value from the map if the map is non-nil -func nilSafeLookup(key string, from map[string]interface{}) (interface{}, bool) { - if from != nil { - value, found := from[key] - return value, found - } - // Not present - return nil, false -} - -// boundsSafeLookup returns the value from the slice if the slice is non-nil and -// the index is in bounds. -func boundsSafeLookup(index int, from []interface{}) (interface{}, bool) { - if from != nil && len(from) > index { - return from[index], true - } - return nil, false -} - -// keysUnion returns a slice containing the union of the keys present in the arguments -func keysUnion(maps ...map[string]interface{}) []string { - keys := map[string]interface{}{} - for _, m := range maps { - for k := range m { - keys[k] = nil - } - } - result := []string{} - for key := range keys { - result = append(result, key) - } - return result -} - -// max returns the argument with the highest value -func max(values ...int) int { - v := 0 - for _, i := range values { - if i > v { - v = i - } - } - return v -} - -// getType returns the type of the arguments. If the arguments don't have matching -// types, getType returns an error. Nil types matching everything. -func getType(args ...interface{}) (reflect.Type, error) { - var last interface{} - for _, next := range args { - // Skip nil values - if next == nil { - continue - } - - // Set the first non-nil value we find and continue - if last == nil { - last = next - continue - } - - // Verify the types of the values match - if reflect.TypeOf(last).Kind() != reflect.TypeOf(next).Kind() { - return nil, fmt.Errorf("missmatching non-nil types for the same field: %T %T", last, next) - } - } - - return reflect.TypeOf(last), nil -} - -// getFieldMeta parses the metadata about the field from the openapi spec -func getFieldMeta(s proto.Schema, name string) (apply.FieldMetaImpl, error) { - m := apply.FieldMetaImpl{} - if s != nil { - ext := s.GetExtensions() - if e, found := ext["x-kubernetes-patch-strategy"]; found { - strategy, ok := e.(string) - if !ok { - return apply.FieldMetaImpl{}, fmt.Errorf("Expected string for x-kubernetes-patch-strategy by got %T", e) - } - - // Take the first strategy if there are substrategies. - // Sub strategies are copied to sub types in openapi.go - strategies := strings.Split(strategy, ",") - if len(strategies) > 2 { - return apply.FieldMetaImpl{}, fmt.Errorf("Expected between 0 and 2 elements for x-kubernetes-patch-merge-strategy by got %v", strategies) - } - // For lists, choose the strategy for this type, not the subtype - m.MergeType = strategies[0] - } - if k, found := ext["x-kubernetes-patch-merge-key"]; found { - key, ok := k.(string) - if !ok { - return apply.FieldMetaImpl{}, fmt.Errorf("Expected string for x-kubernetes-patch-merge-key by got %T", k) - } - m.MergeKeys = apply.MergeKeys(strings.Split(key, ",")) - } - } - m.Name = name - return m, nil -} - -// getCommonGroupVersionKind verifies that the recorded, local and remote all share -// the same GroupVersionKind and returns the value -func getCommonGroupVersionKind(recorded, local, remote map[string]interface{}) (schema.GroupVersionKind, error) { - recordedGVK, err := getGroupVersionKind(recorded) - if err != nil { - return schema.GroupVersionKind{}, err - } - localGVK, err := getGroupVersionKind(local) - if err != nil { - return schema.GroupVersionKind{}, err - } - remoteGVK, err := getGroupVersionKind(remote) - if err != nil { - return schema.GroupVersionKind{}, err - } - - if !reflect.DeepEqual(recordedGVK, localGVK) || !reflect.DeepEqual(localGVK, remoteGVK) { - return schema.GroupVersionKind{}, - fmt.Errorf("group version kinds do not match (recorded: %v local: %v remote: %v)", - recordedGVK, localGVK, remoteGVK) - } - return recordedGVK, nil -} - -// getGroupVersionKind returns the GroupVersionKind of the object -func getGroupVersionKind(config map[string]interface{}) (schema.GroupVersionKind, error) { - gvk := schema.GroupVersionKind{} - if gv, found := config["apiVersion"]; found { - casted, ok := gv.(string) - if !ok { - return gvk, fmt.Errorf("Expected string for apiVersion, found %T", gv) - } - s := strings.Split(casted, "/") - if len(s) != 1 { - gvk.Group = s[0] - } - gvk.Version = s[len(s)-1] - } else { - return gvk, fmt.Errorf("Missing apiVersion in Kind %v", config) - } - if k, found := config["kind"]; found { - casted, ok := k.(string) - if !ok { - return gvk, fmt.Errorf("Expected string for kind, found %T", k) - } - gvk.Kind = casted - } else { - return gvk, fmt.Errorf("Missing kind in Kind %v", config) - } - return gvk, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/visitor.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/visitor.go deleted file mode 100644 index d67ba6a79ba89..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse/visitor.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "k8s.io/kubernetes/pkg/kubectl/apply" - "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" -) - -// ItemVisitor provides an interface for Items to Accept and call -// the Visit function that corresponds to its actual type. -type ItemVisitor interface { - // CreatePrimitiveElement builds an Element for a primitiveItem - CreatePrimitiveElement(*primitiveItem) (apply.Element, error) - - // CreateListElement builds an Element for a listItem - CreateListElement(*listItem) (apply.Element, error) - - // CreateMapElement builds an Element for a mapItem - CreateMapElement(*mapItem) (apply.Element, error) - - // CreateTypeElement builds an Element for a typeItem - CreateTypeElement(*typeItem) (apply.Element, error) -} - -// ElementBuildingVisitor creates an Elements from Items -// An Element combines the values from the Item with the field metadata. -type ElementBuildingVisitor struct { - resources openapi.Resources -} - -// CreatePrimitiveElement creates a primitiveElement -func (v ElementBuildingVisitor) CreatePrimitiveElement(item *primitiveItem) (apply.Element, error) { - return v.primitiveElement(item) -} - -// CreateListElement creates a ListElement -func (v ElementBuildingVisitor) CreateListElement(item *listItem) (apply.Element, error) { - meta, err := getFieldMeta(item.GetMeta(), item.Name) - if err != nil { - return nil, err - } - if meta.GetFieldMergeType() == apply.MergeStrategy { - return v.mergeListElement(meta, item) - } - return v.replaceListElement(meta, item) -} - -// CreateMapElement creates a mapElement -func (v ElementBuildingVisitor) CreateMapElement(item *mapItem) (apply.Element, error) { - meta, err := getFieldMeta(item.GetMeta(), item.Name) - if err != nil { - return nil, err - } - return v.mapElement(meta, item) -} - -// CreateTypeElement creates a typeElement -func (v ElementBuildingVisitor) CreateTypeElement(item *typeItem) (apply.Element, error) { - meta, err := getFieldMeta(item.GetMeta(), item.Name) - if err != nil { - return nil, err - } - return v.typeElement(meta, item) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/primitive_element.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/primitive_element.go deleted file mode 100644 index cc80c7b03265a..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/primitive_element.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apply - -import "reflect" - -// PrimitiveElement contains the recorded, local and remote values for a field -// of type primitive -type PrimitiveElement struct { - // FieldMetaImpl contains metadata about the field from openapi - FieldMetaImpl - - // RawElementData contains the values the field was set to - RawElementData -} - -// Merge implements Element.Merge -func (e PrimitiveElement) Merge(v Strategy) (Result, error) { - return v.MergePrimitive(e) -} - -var _ Element = &PrimitiveElement{} - -// HasConflict returns ConflictError if primitive element has conflict field. -// Conflicts happen when either of the following conditions: -// 1. A field is specified in both recorded and remote values, but does not match. -// 2. A field is specified in recorded values, but missing in remote values. -func (e PrimitiveElement) HasConflict() error { - if e.HasRecorded() && e.HasRemote() { - if !reflect.DeepEqual(e.GetRecorded(), e.GetRemote()) { - return NewConflictError(e) - } - } - if e.HasRecorded() && !e.HasRemote() { - return NewConflictError(e) - } - return nil -} - -var _ ConflictDetector = &PrimitiveElement{} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/BUILD.bazel deleted file mode 100644 index 3298fe923cec8..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "merge.go", - "merge_visitor.go", - "replace_visitor.go", - "retain_keys_visitor.go", - "strategic_visitor.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy", - importpath = "k8s.io/kubernetes/pkg/kubectl/apply/strategy", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/kubernetes/pkg/kubectl/apply:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/doc.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/doc.go deleted file mode 100644 index 419ca4dcedfca..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategy diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/merge.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/merge.go deleted file mode 100644 index 393ffc0a25090..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/merge.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategy - -import "k8s.io/kubernetes/pkg/kubectl/apply" - -// Options controls how a merge will be executed -type Options struct { - // FailOnConflict when true will fail patch creation if the recorded and remote - // have 2 fields set for the same value that cannot be merged. - // e.g. primitive values, list values with replace strategy, and map values with do - // strategy - FailOnConflict bool -} - -// Create returns a new apply.Visitor for merging multiple objects together -func Create(options Options) apply.Strategy { - return createDelegatingStrategy(options) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/merge_visitor.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/merge_visitor.go deleted file mode 100644 index 5a1b1a003ca62..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/merge_visitor.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategy - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/kubectl/apply" -) - -func createMergeStrategy(options Options, strategic *delegatingStrategy) mergeStrategy { - return mergeStrategy{ - strategic, - options, - } -} - -// mergeStrategy merges the values in an Element into a single Result -type mergeStrategy struct { - strategic *delegatingStrategy - options Options -} - -// MergeList merges the lists in a ListElement into a single Result -func (v mergeStrategy) MergeList(e apply.ListElement) (apply.Result, error) { - // No merge logic if adding or deleting a field - if result, done := v.doAddOrDelete(e); done { - return result, nil - } - // Detect conflict in ListElement - if err := v.doConflictDetect(e); err != nil { - return apply.Result{}, err - } - // Merge each item in the list and append it to the list - merged := []interface{}{} - for _, value := range e.Values { - // Recursively merge the list element before adding the value to the list - m, err := value.Merge(v.strategic) - if err != nil { - return apply.Result{}, err - } - - switch m.Operation { - case apply.SET: - // Keep the list item value - merged = append(merged, m.MergedResult) - case apply.DROP: - // Drop the list item value - default: - panic(fmt.Errorf("Unexpected result operation type %+v", m)) - } - } - - if len(merged) == 0 { - // If the list is empty, return a nil entry - return apply.Result{Operation: apply.SET, MergedResult: nil}, nil - } - // Return the merged list, and tell the caller to keep it - return apply.Result{Operation: apply.SET, MergedResult: merged}, nil -} - -// MergeMap merges the maps in a MapElement into a single Result -func (v mergeStrategy) MergeMap(e apply.MapElement) (apply.Result, error) { - // No merge logic if adding or deleting a field - if result, done := v.doAddOrDelete(e); done { - return result, nil - } - // Detect conflict in MapElement - if err := v.doConflictDetect(e); err != nil { - return apply.Result{}, err - } - return v.doMergeMap(e.GetValues()) -} - -// MergeMap merges the type instances in a TypeElement into a single Result -func (v mergeStrategy) MergeType(e apply.TypeElement) (apply.Result, error) { - // No merge logic if adding or deleting a field - if result, done := v.doAddOrDelete(e); done { - return result, nil - } - // Detect conflict in TypeElement - if err := v.doConflictDetect(e); err != nil { - return apply.Result{}, err - } - return v.doMergeMap(e.GetValues()) -} - -// do merges a recorded, local and remote map into a new object -func (v mergeStrategy) doMergeMap(e map[string]apply.Element) (apply.Result, error) { - - // Merge each item in the list - merged := map[string]interface{}{} - for key, value := range e { - // Recursively merge the map element before adding the value to the map - result, err := value.Merge(v.strategic) - if err != nil { - return apply.Result{}, err - } - - switch result.Operation { - case apply.SET: - // Keep the map item value - merged[key] = result.MergedResult - case apply.DROP: - // Drop the map item value - default: - panic(fmt.Errorf("Unexpected result operation type %+v", result)) - } - } - - // Return the merged map, and tell the caller to keep it - if len(merged) == 0 { - // Special case the empty map to set the field value to nil, but keep the field key - // This is how the tests expect the structures to look when parsed from yaml - return apply.Result{Operation: apply.SET, MergedResult: nil}, nil - } - return apply.Result{Operation: apply.SET, MergedResult: merged}, nil -} - -func (v mergeStrategy) doAddOrDelete(e apply.Element) (apply.Result, bool) { - if apply.IsAdd(e) { - return apply.Result{Operation: apply.SET, MergedResult: e.GetLocal()}, true - } - - // Delete the List - if apply.IsDrop(e) { - return apply.Result{Operation: apply.DROP}, true - } - - return apply.Result{}, false -} - -// MergePrimitive returns and error. Primitive elements can't be merged, only replaced. -func (v mergeStrategy) MergePrimitive(diff apply.PrimitiveElement) (apply.Result, error) { - return apply.Result{}, fmt.Errorf("Cannot merge primitive element %v", diff.Name) -} - -// MergeEmpty returns an empty result -func (v mergeStrategy) MergeEmpty(diff apply.EmptyElement) (apply.Result, error) { - return apply.Result{Operation: apply.SET}, nil -} - -// doConflictDetect returns error if element has conflict -func (v mergeStrategy) doConflictDetect(e apply.Element) error { - return v.strategic.doConflictDetect(e) -} - -var _ apply.Strategy = &mergeStrategy{} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/replace_visitor.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/replace_visitor.go deleted file mode 100644 index 047d53ff1bbb3..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/replace_visitor.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategy - -import ( - "k8s.io/kubernetes/pkg/kubectl/apply" -) - -// replaceVisitor creates a patch to replace a remote field value with a local field value -type replaceStrategy struct { - strategic *delegatingStrategy - options Options -} - -func createReplaceStrategy(options Options, strategic *delegatingStrategy) replaceStrategy { - return replaceStrategy{ - strategic, - options, - } -} - -// MergeList returns a result by merging the recorded, local and remote values -// - replacing the remote value with the local value -func (v replaceStrategy) MergeList(e apply.ListElement) (apply.Result, error) { - return v.doReplace(e) -} - -// MergeMap returns a result by merging the recorded, local and remote values -// - replacing the remote value with the local value -func (v replaceStrategy) MergeMap(e apply.MapElement) (apply.Result, error) { - return v.doReplace(e) -} - -// MergeType returns a result by merging the recorded, local and remote values -// - replacing the remote value with the local value -func (v replaceStrategy) MergeType(e apply.TypeElement) (apply.Result, error) { - return v.doReplace(e) -} - -// MergePrimitive returns a result by merging the recorded, local and remote values -// - replacing the remote value with the local value -func (v replaceStrategy) MergePrimitive(e apply.PrimitiveElement) (apply.Result, error) { - return v.doReplace(e) -} - -// MergeEmpty -func (v replaceStrategy) MergeEmpty(e apply.EmptyElement) (apply.Result, error) { - return apply.Result{Operation: apply.SET}, nil -} - -// replace returns the local value if specified, otherwise it returns the remote value -// this works regardless of the approach -func (v replaceStrategy) doReplace(e apply.Element) (apply.Result, error) { - - if result, done := v.doAddOrDelete(e); done { - return result, nil - } - if err := v.doConflictDetect(e); err != nil { - return apply.Result{}, err - } - if e.HasLocal() { - // Specified locally, set the local value - return apply.Result{Operation: apply.SET, MergedResult: e.GetLocal()}, nil - } else if e.HasRemote() { - // Not specified locally, set the remote value - return apply.Result{Operation: apply.SET, MergedResult: e.GetRemote()}, nil - } else { - // Only specified in the recorded, drop the field. - return apply.Result{Operation: apply.DROP, MergedResult: e.GetRemote()}, nil - } -} - -// doAddOrDelete will check if the field should be either added or deleted. If either is true, it will -// true the operation and true. Otherwise it will return false. -func (v replaceStrategy) doAddOrDelete(e apply.Element) (apply.Result, bool) { - if apply.IsAdd(e) { - return apply.Result{Operation: apply.SET, MergedResult: e.GetLocal()}, true - } - - // Delete the List - if apply.IsDrop(e) { - return apply.Result{Operation: apply.DROP}, true - } - - return apply.Result{}, false -} - -// doConflictDetect returns error if element has conflict -func (v replaceStrategy) doConflictDetect(e apply.Element) error { - return v.strategic.doConflictDetect(e) -} - -var _ apply.Strategy = &replaceStrategy{} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/retain_keys_visitor.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/retain_keys_visitor.go deleted file mode 100644 index b901285be1e49..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/retain_keys_visitor.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategy - -import ( - "fmt" - "k8s.io/kubernetes/pkg/kubectl/apply" -) - -func createRetainKeysStrategy(options Options, strategic *delegatingStrategy) retainKeysStrategy { - return retainKeysStrategy{ - &mergeStrategy{strategic, options}, - strategic, - options, - } -} - -// retainKeysStrategy merges the values in an Element into a single Result, -// dropping any fields omitted from the local copy. (but merging values when -// defined locally and remotely) -type retainKeysStrategy struct { - merge *mergeStrategy - strategic *delegatingStrategy - options Options -} - -// MergeMap merges the type instances in a TypeElement into a single Result -// keeping only the fields defined locally, but merging their values with -// the remote values. -func (v retainKeysStrategy) MergeType(e apply.TypeElement) (apply.Result, error) { - // No merge logic if adding or deleting a field - if result, done := v.merge.doAddOrDelete(&e); done { - return result, nil - } - - elem := map[string]apply.Element{} - for key := range e.GetLocalMap() { - elem[key] = e.GetValues()[key] - } - return v.merge.doMergeMap(elem) -} - -// MergeMap returns an error. Only TypeElements can have retainKeys. -func (v retainKeysStrategy) MergeMap(e apply.MapElement) (apply.Result, error) { - return apply.Result{}, fmt.Errorf("Cannot use retainkeys with map element %v", e.Name) -} - -// MergeList returns an error. Only TypeElements can have retainKeys. -func (v retainKeysStrategy) MergeList(e apply.ListElement) (apply.Result, error) { - return apply.Result{}, fmt.Errorf("Cannot use retainkeys with list element %v", e.Name) -} - -// MergePrimitive returns an error. Only TypeElements can have retainKeys. -func (v retainKeysStrategy) MergePrimitive(diff apply.PrimitiveElement) (apply.Result, error) { - return apply.Result{}, fmt.Errorf("Cannot use retainkeys with primitive element %v", diff.Name) -} - -// MergeEmpty returns an empty result -func (v retainKeysStrategy) MergeEmpty(diff apply.EmptyElement) (apply.Result, error) { - return v.merge.MergeEmpty(diff) -} - -var _ apply.Strategy = &retainKeysStrategy{} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/strategic_visitor.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/strategic_visitor.go deleted file mode 100644 index 8c2d592b05b0a..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/strategic_visitor.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategy - -import ( - "k8s.io/kubernetes/pkg/kubectl/apply" -) - -// delegatingStrategy delegates merging fields to other visitor implementations -// based on the merge strategy preferred by the field. -type delegatingStrategy struct { - options Options - merge mergeStrategy - replace replaceStrategy - retainKeys retainKeysStrategy -} - -// createDelegatingStrategy returns a new delegatingStrategy -func createDelegatingStrategy(options Options) *delegatingStrategy { - v := &delegatingStrategy{ - options: options, - } - v.replace = createReplaceStrategy(options, v) - v.merge = createMergeStrategy(options, v) - v.retainKeys = createRetainKeysStrategy(options, v) - return v -} - -// MergeList delegates visiting a list based on the field patch strategy. -// Defaults to "replace" -func (v delegatingStrategy) MergeList(diff apply.ListElement) (apply.Result, error) { - switch diff.GetFieldMergeType() { - case apply.MergeStrategy: - return v.merge.MergeList(diff) - case apply.ReplaceStrategy: - return v.replace.MergeList(diff) - case apply.RetainKeysStrategy: - return v.retainKeys.MergeList(diff) - default: - return v.replace.MergeList(diff) - } -} - -// MergeMap delegates visiting a map based on the field patch strategy. -// Defaults to "merge" -func (v delegatingStrategy) MergeMap(diff apply.MapElement) (apply.Result, error) { - switch diff.GetFieldMergeType() { - case apply.MergeStrategy: - return v.merge.MergeMap(diff) - case apply.ReplaceStrategy: - return v.replace.MergeMap(diff) - case apply.RetainKeysStrategy: - return v.retainKeys.MergeMap(diff) - default: - return v.merge.MergeMap(diff) - } -} - -// MergeType delegates visiting a map based on the field patch strategy. -// Defaults to "merge" -func (v delegatingStrategy) MergeType(diff apply.TypeElement) (apply.Result, error) { - switch diff.GetFieldMergeType() { - case apply.MergeStrategy: - return v.merge.MergeType(diff) - case apply.ReplaceStrategy: - return v.replace.MergeType(diff) - case apply.RetainKeysStrategy: - return v.retainKeys.MergeType(diff) - default: - return v.merge.MergeType(diff) - } -} - -// MergePrimitive delegates visiting a primitive to the ReplaceVisitorSingleton. -func (v delegatingStrategy) MergePrimitive(diff apply.PrimitiveElement) (apply.Result, error) { - // Always replace primitives - return v.replace.MergePrimitive(diff) -} - -// MergeEmpty -func (v delegatingStrategy) MergeEmpty(diff apply.EmptyElement) (apply.Result, error) { - return v.merge.MergeEmpty(diff) -} - -// doConflictDetect detects conflicts in element when option enabled, return error if conflict happened. -func (v delegatingStrategy) doConflictDetect(e apply.Element) error { - if v.options.FailOnConflict { - if e, ok := e.(apply.ConflictDetector); ok { - return e.HasConflict() - } - } - return nil -} - -var _ apply.Strategy = &delegatingStrategy{} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/test_swagger.json b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/test_swagger.json deleted file mode 100644 index 57f69314d60ba..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy/test_swagger.json +++ /dev/null @@ -1,250 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "Kubernetes", - "version": "v1.9.0" - }, - "paths": { - }, - "definitions": { - "io.k8s.api.core.v1.Container": { - "description": "A single application container that you want to run within a pod.", - "required": [ - "name", - "image" - ], - "properties": { - "image": { - "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", - "type": "string" - }, - "name": { - "description": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - "type": "string" - }, - "ports": { - "description": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", - "type": "array", - "items": { - "$ref": "#/definitions/io.k8s.api.core.v1.ContainerPort" - }, - "x-kubernetes-patch-merge-key": "containerPort,protocol", - "x-kubernetes-patch-strategy": "merge" - } - } - }, - "io.k8s.api.core.v1.ContainerPort": { - "description": "ContainerPort represents a network port in a single container.", - "required": [ - "containerPort" - ], - "properties": { - "containerPort": { - "description": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 \u003c x \u003c 65536.", - "type": "integer", - "format": "int32" - }, - "hostIP": { - "description": "What host IP to bind the external port to.", - "type": "string" - }, - "hostPort": { - "description": "Number of port to expose on the host. If specified, this must be a valid port number, 0 \u003c x \u003c 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.", - "type": "integer", - "format": "int32" - }, - "name": { - "description": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.", - "type": "string" - }, - "protocol": { - "description": "Protocol for port. Must be UDP or TCP. Defaults to \"TCP\".", - "type": "string" - } - } - }, - "io.k8s.api.apps.v1beta1.Deployment": { - "description": "DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "description": "Standard object metadata.", - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" - }, - "spec": { - "description": "Specification of the desired behavior of the Deployment.", - "$ref": "#/definitions/io.k8s.api.apps.v1beta1.DeploymentSpec" - } - }, - "x-kubernetes-group-version-kind": [ - { - "group": "apps", - "kind": "Deployment", - "version": "v1beta1" - } - ] - }, - "io.k8s.api.apps.v1beta1.DeploymentSpec": { - "description": "DeploymentSpec is the specification of the desired behavior of the Deployment.", - "required": [ - "template" - ], - "properties": { - "minReadySeconds": { - "description": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "type": "integer", - "format": "int32" - }, - "paused": { - "description": "Indicates that the deployment is paused.", - "type": "boolean" - }, - "progressDeadlineSeconds": { - "description": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Once autoRollback is implemented, the deployment controller will automatically rollback failed deployments. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", - "type": "integer", - "format": "int32" - }, - "replicas": { - "description": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "type": "integer", - "format": "int32" - }, - "revisionHistoryLimit": { - "description": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", - "type": "integer", - "format": "int32" - }, - "template": { - "description": "Template describes the pods that will be created.", - "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec" - } - } - }, - "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta": { - "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", - "properties": { - "annotations": { - "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "labels": { - "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "name": { - "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "type": "string" - }, - "namespace": { - "description": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", - "type": "string" - } - } - }, - "io.k8s.api.core.v1.PodTemplateSpec": { - "description": "PodTemplateSpec describes the data a pod should have when created from a template", - "properties": { - "metadata": { - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" - }, - "spec": { - "description": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "$ref": "#/definitions/io.k8s.api.core.v1.PodSpec" - } - } - }, - "io.k8s.api.core.v1.PodSpec": { - "description": "PodSpec is a description of a pod.", - "required": [ - "containers" - ], - "properties": { - "containers": { - "description": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", - "type": "array", - "items": { - "$ref": "#/definitions/io.k8s.api.core.v1.Container" - }, - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge" - } - } - }, - "io.k8s.api.extensions.v1beta1.ReplicaSet": { - "description": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet represents the configuration of a ReplicaSet.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "description": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" - }, - "spec": { - "description": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ReplicaSetSpec", - "x-kubernetes-patch-strategy": "replace" - - } - }, - "x-kubernetes-group-version-kind": [ - { - "group": "extensions", - "kind": "ReplicaSet", - "version": "v1beta1" - } - ] - }, - "io.k8s.api.extensions.v1beta1.ReplicaSetSpec": { - "description": "ReplicaSetSpec is the specification of a ReplicaSet.", - "properties": { - "minReadySeconds": { - "description": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "type": "integer", - "format": "int32" - }, - "replicas": { - "description": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", - "type": "integer", - "format": "int32" - }, - "template": { - "description": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", - "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec" - } - } - } - }, - "securityDefinitions": { - "BearerToken": { - "description": "Bearer Token authentication", - "type": "apiKey", - "name": "authorization", - "in": "header" - } - }, - "security": [ - { - "BearerToken": [] - } - ] - } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/type_element.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/type_element.go deleted file mode 100644 index 2528db87d1d21..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/type_element.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apply - -// TypeElement contains the recorded, local and remote values for a field -// that is a complex type -type TypeElement struct { - // FieldMetaImpl contains metadata about the field from openapi - FieldMetaImpl - - MapElementData - - // Values contains the combined recorded-local-remote value of each field in the type - // Values contains the values in mapElement. Element must contain - // a Name matching its key in Values - Values map[string]Element -} - -// Merge implements Element.Merge -func (e TypeElement) Merge(v Strategy) (Result, error) { - return v.MergeType(e) -} - -// GetValues implements Element.GetValues -func (e TypeElement) GetValues() map[string]Element { - return e.Values -} - -// HasConflict returns ConflictError if some elements in type conflict. -func (e TypeElement) HasConflict() error { - for _, item := range e.GetValues() { - if item, ok := item.(ConflictDetector); ok { - if err := item.HasConflict(); err != nil { - return err - } - } - } - return nil -} - -var _ Element = &TypeElement{} -var _ ConflictDetector = &TypeElement{} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/visitor.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply/visitor.go deleted file mode 100644 index d63d973cd77c6..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/visitor.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apply - -// Strategy implements a strategy for merging recorded, local and remote values contained -// in an element and returns the merged result. -// Follows the visitor pattern -type Strategy interface { - // MergeList is invoked by ListElements when Merge is called - MergeList(ListElement) (Result, error) - - // MergeMap is invoked by MapElements when Merge is called - MergeMap(MapElement) (Result, error) - - // MergeType is invoked by TypeElements when Merge is called - MergeType(TypeElement) (Result, error) - - // MergePrimitive is invoked by PrimitiveElements when Merge is called - MergePrimitive(PrimitiveElement) (Result, error) - - // MergeEmpty is invoked by EmptyElements when Merge is called - MergeEmpty(EmptyElement) (Result, error) -} - -// Operation records whether a field should be set or dropped -type Operation int - -const ( - // ERROR is an error during merge - ERROR Operation = iota - // SET sets the field on an object - SET - // DROP drops the field from an object - DROP -) - -// Result is the result of merging fields -type Result struct { - // Operation is the operation that should be performed for the merged field - Operation Operation - // MergedResult is the new merged value - MergedResult interface{} -} - -const ( - // MergeStrategy is the strategy to merge the local and remote values - MergeStrategy = "merge" - - // RetainKeysStrategy is the strategy to merge the local and remote values, but drop any fields not defined locally - RetainKeysStrategy = "retainKeys" - - // ReplaceStrategy is the strategy to replace the remote value with the local value - ReplaceStrategy = "replace" -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/BUILD.bazel index 4d2298d414b40..cce7b6ba8461f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/BUILD.bazel @@ -4,144 +4,59 @@ go_library( name = "go_default_library", srcs = [ "alpha.go", - "annotate.go", - "apiresources.go", - "apiversions.go", - "apply.go", - "apply_edit_last_applied.go", - "apply_set_last_applied.go", - "apply_view_last_applied.go", - "attach.go", - "autoscale.go", - "certificates.go", - "clusterinfo.go", - "clusterinfo_dump.go", "cmd.go", - "completion.go", - "convert.go", - "cp.go", - "delete.go", - "delete_flags.go", - "describe.go", - "diff.go", - "drain.go", - "edit.go", - "exec.go", - "explain.go", - "expose.go", - "help.go", - "label.go", - "logs.go", - "options.go", - "patch.go", - "plugin.go", - "portforward.go", - "proxy.go", - "replace.go", - "rollingupdate.go", - "run.go", - "scale.go", - "taint.go", - "top.go", - "top_node.go", - "top_pod.go", - "version.go", + "profiling.go", ], importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd", importpath = "k8s.io/kubernetes/pkg/kubectl/cmd", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/daviddengcn/go-colortext:go_default_library", - "//vendor/github.com/docker/distribution/reference:go_default_library", - "//vendor/github.com/docker/docker/pkg/term:go_default_library", - "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/jonboulle/clockwork:go_default_library", - "//vendor/github.com/renstrom/dedent:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/k8s.io/api/autoscaling/v1:go_default_library", - "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/policy/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", - "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", - "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/dynamic:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", - "//vendor/k8s.io/client-go/tools/portforward:go_default_library", - "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", - "//vendor/k8s.io/client-go/tools/watch:go_default_library", - "//vendor/k8s.io/client-go/transport/spdy:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/apply/parse:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/apply/strategy:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/annotate:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/attach:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/certificates:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/completion:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cp:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/describe:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/edit:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/explain:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/expose:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/label:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/options:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/patch:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/plugin:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/portforward:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/proxy:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/replace:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/version:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/wait:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/explain:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/proxy:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/term:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/validation:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/printers:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/util/interrupt:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/util/taints:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/version:go_default_library", - "//vendor/k8s.io/metrics/pkg/apis/metrics:go_default_library", - "//vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library", - "//vendor/k8s.io/metrics/pkg/client/clientset/versioned:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/alpha.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/alpha.go index 835dd51c77a7f..9fa8fde545823 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/alpha.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/alpha.go @@ -20,9 +20,9 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // NewCmdAlpha creates a command that acts as an alternate root command for features in alpha @@ -36,7 +36,6 @@ func NewCmdAlpha(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra. // Alpha commands should be added here. As features graduate from alpha they should move // from here to the CommandGroups defined by NewKubeletCommand() in cmd.go. //cmd.AddCommand(NewCmdDebug(f, in, out, err)) - cmd.AddCommand(NewCmdDiff(f, streams)) // NewKubeletCommand() will hide the alpha command if it has no subcommands. Overriding // the help function ensures a reasonable message if someone types the hidden command anyway. diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/annotate/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/annotate/BUILD.bazel new file mode 100644 index 0000000000000..0b68e4a652e16 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/annotate/BUILD.bazel @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["annotate.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/annotate", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/annotate", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/evanphx/json-patch:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/annotate.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/annotate/annotate.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/annotate.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/annotate/annotate.go index 9c6d96a393ecd..56fe5603fb47b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/annotate.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/annotate/annotate.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package annotate import ( "bytes" @@ -22,8 +22,8 @@ import ( "io" jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,10 +35,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // AnnotateOptions have the data required to perform the annotate operation @@ -123,11 +123,11 @@ func NewCmdAnnotate(parent string, f cmdutil.Factory, ioStreams genericclioption o := NewAnnotateOptions(ioStreams) cmd := &cobra.Command{ - Use: "annotate [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]", + Use: "annotate [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]", DisableFlagsInUseLine: true, - Short: i18n.T("Update the annotations on a resource"), - Long: annotateLong + "\n\n" + cmdutil.SuggestApiResources(parent), - Example: annotateExample, + Short: i18n.T("Update the annotations on a resource"), + Long: annotateLong + "\n\n" + cmdutil.SuggestApiResources(parent), + Example: annotateExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -245,7 +245,7 @@ func (o AnnotateOptions) RunAnnotate() error { // only apply resource version locking on a single resource. // we must perform this check after o.builder.Do() as - // []o.resources can not not accurately return the proper number + // []o.resources can not accurately return the proper number // of resources when they are not passed in "resource/name" format. if !singleItemImpliedResource && len(o.resourceVersion) > 0 { return fmt.Errorf("--resource-version may only be used with a single resource") @@ -271,7 +271,7 @@ func (o AnnotateOptions) RunAnnotate() error { return err } if err := o.Recorder.Record(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } if err := o.updateAnnotations(obj); err != nil { return err @@ -283,7 +283,7 @@ func (o AnnotateOptions) RunAnnotate() error { patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) createdPatch := err == nil if err != nil { - glog.V(2).Infof("couldn't compute patch: %v", err) + klog.V(2).Infof("couldn't compute patch: %v", err) } mapping := info.ResourceMapping() diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources/BUILD.bazel new file mode 100644 index 0000000000000..5a30d03351ce7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "apiresources.go", + "apiversions.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/apiresources", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/printers:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources/apiresources.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources/apiresources.go index 7d5124a784d1d..2e3d90bf2394f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources/apiresources.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package apiresources import ( "fmt" @@ -28,9 +28,9 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/printers" + "k8s.io/kubernetes/pkg/kubectl/util/printers" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -77,7 +77,8 @@ func NewAPIResourceOptions(ioStreams genericclioptions.IOStreams) *ApiResourcesO } } -func NewCmdApiResources(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { +// NewCmdAPIResources creates the `api-resources` command +func NewCmdAPIResources(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { o := NewAPIResourceOptions(ioStreams) cmd := &cobra.Command{ @@ -86,7 +87,8 @@ func NewCmdApiResources(f cmdutil.Factory, ioStreams genericclioptions.IOStreams Long: "Print the supported API resources on the server", Example: apiresourcesExample, Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(o.Validate(cmd)) + cmdutil.CheckErr(o.Complete(cmd, args)) + cmdutil.CheckErr(o.Validate()) cmdutil.CheckErr(o.RunApiResources(cmd, f)) }, } @@ -101,7 +103,7 @@ func NewCmdApiResources(f cmdutil.Factory, ioStreams genericclioptions.IOStreams return cmd } -func (o *ApiResourcesOptions) Validate(cmd *cobra.Command) error { +func (o *ApiResourcesOptions) Validate() error { supportedOutputTypes := sets.NewString("", "wide", "name") if !supportedOutputTypes.Has(o.Output) { return fmt.Errorf("--output %v is not available", o.Output) @@ -109,6 +111,13 @@ func (o *ApiResourcesOptions) Validate(cmd *cobra.Command) error { return nil } +func (o *ApiResourcesOptions) Complete(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return cmdutil.UsageErrorf(cmd, "unexpected arguments: %v", args) + } + return nil +} + func (o *ApiResourcesOptions) RunApiResources(cmd *cobra.Command, f cmdutil.Factory) error { w := printers.GetNewTabWriter(o.Out) defer w.Flush() diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiversions.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources/apiversions.go similarity index 83% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiversions.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources/apiversions.go index fafd7e2932e5e..738aa51b6b40f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiversions.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apiresources/apiversions.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package apiresources import ( "fmt" @@ -25,9 +25,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/discovery" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -48,7 +48,8 @@ func NewApiVersionsOptions(ioStreams genericclioptions.IOStreams) *ApiVersionsOp } } -func NewCmdApiVersions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { +// NewCmdAPIVersions creates the `api-versions` command +func NewCmdAPIVersions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { o := NewApiVersionsOptions(ioStreams) cmd := &cobra.Command{ Use: "api-versions", @@ -56,14 +57,17 @@ func NewCmdApiVersions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) Long: "Print the supported API versions on the server, in the form of \"group/version\"", Example: apiversionsExample, Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(o.Complete(f)) + cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.RunApiVersions()) }, } return cmd } -func (o *ApiVersionsOptions) Complete(f cmdutil.Factory) error { +func (o *ApiVersionsOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return cmdutil.UsageErrorf(cmd, "unexpected arguments: %v", args) + } var err error o.discoveryClient, err = f.ToDiscoveryClient() if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/BUILD.bazel new file mode 100644 index 0000000000000..bc8c62db127de --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/BUILD.bazel @@ -0,0 +1,48 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "apply.go", + "apply_edit_last_applied.go", + "apply_set_last_applied.go", + "apply_view_last_applied.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/apply", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/jonboulle/clockwork:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/validation:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply.go similarity index 79% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply.go index b4d588c089706..07f8b9c7594ee 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply.go @@ -14,19 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package apply import ( + "encoding/json" "fmt" "io" "strings" "time" - "github.com/golang/glog" "github.com/jonboulle/clockwork" "github.com/spf13/cobra" - - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,15 +41,17 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" + "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" + "k8s.io/klog" oapi "k8s.io/kube-openapi/pkg/util/proto" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + "k8s.io/kubernetes/pkg/kubectl/cmd/delete" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "k8s.io/kubernetes/pkg/kubectl/validation" ) @@ -61,8 +62,8 @@ type ApplyOptions struct { PrintFlags *genericclioptions.PrintFlags ToPrinter func(string) (printers.ResourcePrinter, error) - DeleteFlags *DeleteFlags - DeleteOptions *DeleteOptions + DeleteFlags *delete.DeleteFlags + DeleteOptions *delete.DeleteOptions Selector string DryRun bool @@ -76,11 +77,12 @@ type ApplyOptions struct { PruneWhitelist []string ShouldIncludeUninitialized bool - Validator validation.Schema - Builder *resource.Builder - Mapper meta.RESTMapper - DynamicClient dynamic.Interface - OpenAPISchema openapi.Resources + Validator validation.Schema + Builder *resource.Builder + Mapper meta.RESTMapper + DynamicClient dynamic.Interface + DiscoveryClient discovery.DiscoveryInterface + OpenAPISchema openapi.Resources Namespace string EnforceNamespace bool @@ -127,7 +129,7 @@ var ( func NewApplyOptions(ioStreams genericclioptions.IOStreams) *ApplyOptions { return &ApplyOptions{ RecordFlags: genericclioptions.NewRecordFlags(), - DeleteFlags: NewDeleteFlags("that contains the configuration to apply"), + DeleteFlags: delete.NewDeleteFlags("that contains the configuration to apply"), PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), Overwrite: true, @@ -147,11 +149,11 @@ func NewCmdApply(baseName string, f cmdutil.Factory, ioStreams genericclioptions o.cmdBaseName = baseName cmd := &cobra.Command{ - Use: "apply -f FILENAME", + Use: "apply -f FILENAME", DisableFlagsInUseLine: true, - Short: i18n.T("Apply a configuration to a resource by filename or stdin"), - Long: applyLong, - Example: applyExample, + Short: i18n.T("Apply a configuration to a resource by filename or stdin"), + Long: applyLong, + Example: applyExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd)) cmdutil.CheckErr(validateArgs(cmd, args)) @@ -211,6 +213,11 @@ func (o *ApplyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { return err } + o.DiscoveryClient, err = f.ToDiscoveryClient() + if err != nil { + return err + } + dynamicClient, err := f.DynamicClient() if err != nil { return err @@ -293,6 +300,11 @@ func (o *ApplyOptions) Run() error { openapiSchema = o.OpenAPISchema } + dryRunVerifier := &DryRunVerifier{ + Finder: cmdutil.NewCRDFinder(cmdutil.CRDFromDynamic(o.DynamicClient)), + OpenAPIGetter: o.DiscoveryClient, + } + // include the uninitialized objects by default if --prune is true // unless explicitly set --include-uninitialized=false r := o.Builder. @@ -331,12 +343,19 @@ func (o *ApplyOptions) Run() error { return err } + // If server-dry-run is requested but the type doesn't support it, fail right away. + if o.ServerDryRun { + if err := dryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + return err + } + } + if info.Namespaced() { visitedNamespaces.Insert(info.Namespace) } if err := o.Recorder.Record(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } // Get the modified configuration of the object. Embed the result @@ -354,6 +373,7 @@ func (o *ApplyOptions) Run() error { if !errors.IsNotFound(err) { return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) } + // Create the resource if it doesn't exist // First, update the annotation used by kubectl apply if err := kubectl.CreateApplyAnnotation(info.Object, unstructured.UnstructuredJSONScheme); err != nil { @@ -371,13 +391,14 @@ func (o *ApplyOptions) Run() error { return cmdutil.AddSourceToErr("creating", info.Source, err) } info.Refresh(obj, true) - metadata, err := meta.Accessor(info.Object) - if err != nil { - return err - } - visitedUids.Insert(string(metadata.GetUID())) } + metadata, err := meta.Accessor(info.Object) + if err != nil { + return err + } + visitedUids.Insert(string(metadata.GetUID())) + count++ if printObject { @@ -392,41 +413,41 @@ func (o *ApplyOptions) Run() error { return printer.PrintObj(info.Object, o.Out) } - if !o.DryRun { - metadata, err := meta.Accessor(info.Object) - if err != nil { - return err - } + metadata, err := meta.Accessor(info.Object) + if err != nil { + return err + } + visitedUids.Insert(string(metadata.GetUID())) + if !o.DryRun { annotationMap := metadata.GetAnnotations() - if _, ok := annotationMap[api.LastAppliedConfigAnnotation]; !ok { + if _, ok := annotationMap[corev1.LastAppliedConfigAnnotation]; !ok { fmt.Fprintf(o.ErrOut, warningNoLastAppliedConfigAnnotation, o.cmdBaseName) } helper := resource.NewHelper(info.Client, info.Mapping) - patcher := &patcher{ - mapping: info.Mapping, - helper: helper, - dynamicClient: o.DynamicClient, - overwrite: o.Overwrite, - backOff: clockwork.NewRealClock(), - force: o.DeleteOptions.ForceDeletion, - cascade: o.DeleteOptions.Cascade, - timeout: o.DeleteOptions.Timeout, - gracePeriod: o.DeleteOptions.GracePeriod, - serverDryRun: o.ServerDryRun, - openapiSchema: openapiSchema, + patcher := &Patcher{ + Mapping: info.Mapping, + Helper: helper, + DynamicClient: o.DynamicClient, + Overwrite: o.Overwrite, + BackOff: clockwork.NewRealClock(), + Force: o.DeleteOptions.ForceDeletion, + Cascade: o.DeleteOptions.Cascade, + Timeout: o.DeleteOptions.Timeout, + GracePeriod: o.DeleteOptions.GracePeriod, + ServerDryRun: o.ServerDryRun, + OpenapiSchema: openapiSchema, + Retries: maxPatchRetry, } - patchBytes, patchedObject, err := patcher.patch(info.Object, modified, info.Source, info.Namespace, info.Name, o.ErrOut) + patchBytes, patchedObject, err := patcher.Patch(info.Object, modified, info.Source, info.Namespace, info.Name, o.ErrOut) if err != nil { return cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err) } info.Refresh(patchedObject, true) - visitedUids.Insert(string(metadata.GetUID())) - if string(patchBytes) == "{}" && !printObject { count++ @@ -467,7 +488,7 @@ func (o *ApplyOptions) Run() error { objToPrint := objs[0] if len(objs) > 1 { - list := &v1.List{ + list := &corev1.List{ TypeMeta: metav1.TypeMeta{ Kind: "List", APIVersion: "v1", @@ -619,7 +640,7 @@ func (p *pruner) prune(namespace string, mapping *meta.RESTMapping, includeUnini return err } annots := metadata.GetAnnotations() - if _, ok := annots[api.LastAppliedConfigAnnotation]; !ok { + if _, ok := annots[corev1.LastAppliedConfigAnnotation]; !ok { // don't prune resources not created with apply continue } @@ -663,28 +684,89 @@ func runDelete(namespace, name string, mapping *meta.RESTMapping, c dynamic.Inte return c.Resource(mapping.Resource).Namespace(namespace).Delete(name, options) } -func (p *patcher) delete(namespace, name string) error { - return runDelete(namespace, name, p.mapping, p.dynamicClient, p.cascade, p.gracePeriod, p.serverDryRun) +func (p *Patcher) delete(namespace, name string) error { + return runDelete(namespace, name, p.Mapping, p.DynamicClient, p.Cascade, p.GracePeriod, p.ServerDryRun) } -type patcher struct { - mapping *meta.RESTMapping - helper *resource.Helper - dynamicClient dynamic.Interface +type Patcher struct { + Mapping *meta.RESTMapping + Helper *resource.Helper + DynamicClient dynamic.Interface - overwrite bool - backOff clockwork.Clock + Overwrite bool + BackOff clockwork.Clock - force bool - cascade bool - timeout time.Duration - gracePeriod int - serverDryRun bool + Force bool + Cascade bool + Timeout time.Duration + GracePeriod int + ServerDryRun bool + + // If set, forces the patch against a specific resourceVersion + ResourceVersion *string - openapiSchema openapi.Resources + // Number of retries to make if the patch fails with conflict + Retries int + + OpenapiSchema openapi.Resources } -func (p *patcher) patchSimple(obj runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { +// DryRunVerifier verifies if a given group-version-kind supports DryRun +// against the current server. Sending dryRun requests to apiserver that +// don't support it will result in objects being unwillingly persisted. +// +// It reads the OpenAPI to see if the given GVK supports dryRun. If the +// GVK can not be found, we assume that CRDs will have the same level of +// support as "namespaces", and non-CRDs will not be supported. We +// delay the check for CRDs as much as possible though, since it +// requires an extra round-trip to the server. +type DryRunVerifier struct { + Finder cmdutil.CRDFinder + OpenAPIGetter discovery.OpenAPISchemaInterface +} + +// HasSupport verifies if the given gvk supports DryRun. An error is +// returned if it doesn't. +func (v *DryRunVerifier) HasSupport(gvk schema.GroupVersionKind) error { + oapi, err := v.OpenAPIGetter.OpenAPISchema() + if err != nil { + return fmt.Errorf("failed to download openapi: %v", err) + } + supports, err := openapi.SupportsDryRun(oapi, gvk) + if err != nil { + // We assume that we couldn't find the type, then check for namespace: + supports, _ = openapi.SupportsDryRun(oapi, schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}) + // If namespace supports dryRun, then we will support dryRun for CRDs only. + if supports { + supports, err = v.Finder.HasCRD(gvk.GroupKind()) + if err != nil { + return fmt.Errorf("failed to check CRD: %v", err) + } + } + } + if !supports { + return fmt.Errorf("%v doesn't support dry-run", gvk) + } + return nil +} + +func addResourceVersion(patch []byte, rv string) ([]byte, error) { + var patchMap map[string]interface{} + err := json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, err + } + u := unstructured.Unstructured{Object: patchMap} + a, err := meta.Accessor(&u) + if err != nil { + return nil, err + } + a.SetResourceVersion(rv) + + return json.Marshal(patchMap) +} + +func (p *Patcher) patchSimple(obj runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { // Serialize the current configuration of the object from the server. current, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { @@ -705,7 +787,7 @@ func (p *patcher) patchSimple(obj runtime.Object, modified []byte, source, names // Create the versioned struct from the type defined in the restmapping // (which is the API version we'll be submitting the patch to) - versionedObject, err := scheme.Scheme.New(p.mapping.GroupVersionKind) + versionedObject, err := scheme.Scheme.New(p.Mapping.GroupVersionKind) switch { case runtime.IsNotRegisteredError(err): // fall back to generic JSON merge patch @@ -720,17 +802,17 @@ func (p *patcher) patchSimple(obj runtime.Object, modified []byte, source, names return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf(createPatchErrFormat, original, modified, current), source, err) } case err != nil: - return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf("getting instance of versioned object for %v:", p.mapping.GroupVersionKind), source, err) + return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf("getting instance of versioned object for %v:", p.Mapping.GroupVersionKind), source, err) case err == nil: // Compute a three way strategic merge patch to send to server. patchType = types.StrategicMergePatchType // Try to use openapi first if the openapi spec is available and can successfully calculate the patch. // Otherwise, fall back to baked-in types. - if p.openapiSchema != nil { - if schema = p.openapiSchema.LookupResource(p.mapping.GroupVersionKind); schema != nil { + if p.OpenapiSchema != nil { + if schema = p.OpenapiSchema.LookupResource(p.Mapping.GroupVersionKind); schema != nil { lookupPatchMeta = strategicpatch.PatchMetaFromOpenAPI{Schema: schema} - if openapiPatch, err := strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.overwrite); err != nil { + if openapiPatch, err := strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.Overwrite); err != nil { fmt.Fprintf(errOut, "warning: error calculating patch from openapi spec: %v\n", err) } else { patchType = types.StrategicMergePatchType @@ -744,7 +826,7 @@ func (p *patcher) patchSimple(obj runtime.Object, modified []byte, source, names if err != nil { return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf(createPatchErrFormat, original, modified, current), source, err) } - patch, err = strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.overwrite) + patch, err = strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.Overwrite) if err != nil { return nil, nil, cmdutil.AddSourceToErr(fmt.Sprintf(createPatchErrFormat, original, modified, current), source, err) } @@ -755,41 +837,51 @@ func (p *patcher) patchSimple(obj runtime.Object, modified []byte, source, names return patch, obj, nil } + if p.ResourceVersion != nil { + patch, err = addResourceVersion(patch, *p.ResourceVersion) + if err != nil { + return nil, nil, cmdutil.AddSourceToErr("Failed to insert resourceVersion in patch", source, err) + } + } + options := metav1.UpdateOptions{} - if p.serverDryRun { + if p.ServerDryRun { options.DryRun = []string{metav1.DryRunAll} } - patchedObj, err := p.helper.Patch(namespace, name, patchType, patch, &options) + patchedObj, err := p.Helper.Patch(namespace, name, patchType, patch, &options) return patch, patchedObj, err } -func (p *patcher) patch(current runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { +func (p *Patcher) Patch(current runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { var getErr error patchBytes, patchObject, err := p.patchSimple(current, modified, source, namespace, name, errOut) - for i := 1; i <= maxPatchRetry && errors.IsConflict(err); i++ { + if p.Retries == 0 { + p.Retries = maxPatchRetry + } + for i := 1; i <= p.Retries && errors.IsConflict(err); i++ { if i > triesBeforeBackOff { - p.backOff.Sleep(backOffPeriod) + p.BackOff.Sleep(backOffPeriod) } - current, getErr = p.helper.Get(namespace, name, false) + current, getErr = p.Helper.Get(namespace, name, false) if getErr != nil { return nil, nil, getErr } patchBytes, patchObject, err = p.patchSimple(current, modified, source, namespace, name, errOut) } - if err != nil && (errors.IsConflict(err) || errors.IsInvalid(err)) && p.force { + if err != nil && (errors.IsConflict(err) || errors.IsInvalid(err)) && p.Force { patchBytes, patchObject, err = p.deleteAndCreate(current, modified, namespace, name) } return patchBytes, patchObject, err } -func (p *patcher) deleteAndCreate(original runtime.Object, modified []byte, namespace, name string) ([]byte, runtime.Object, error) { +func (p *Patcher) deleteAndCreate(original runtime.Object, modified []byte, namespace, name string) ([]byte, runtime.Object, error) { if err := p.delete(namespace, name); err != nil { return modified, nil, err } // TODO: use wait - if err := wait.PollImmediate(1*time.Second, p.timeout, func() (bool, error) { - if _, err := p.helper.Get(namespace, name, false); !errors.IsNotFound(err) { + if err := wait.PollImmediate(1*time.Second, p.Timeout, func() (bool, error) { + if _, err := p.Helper.Get(namespace, name, false); !errors.IsNotFound(err) { return false, err } return true, nil @@ -801,14 +893,14 @@ func (p *patcher) deleteAndCreate(original runtime.Object, modified []byte, name return modified, nil, err } options := metav1.CreateOptions{} - if p.serverDryRun { + if p.ServerDryRun { options.DryRun = []string{metav1.DryRunAll} } - createdObject, err := p.helper.Create(namespace, true, versionedObject, &options) + createdObject, err := p.Helper.Create(namespace, true, versionedObject, &options) if err != nil { // restore the original object if we fail to create the new one // but still propagate and advertise error to user - recreated, recreateErr := p.helper.Create(namespace, true, original, &options) + recreated, recreateErr := p.Helper.Create(namespace, true, original, &options) if recreateErr != nil { err = fmt.Errorf("An error occurred force-replacing the existing object with the newly provided one:\n\n%v.\n\nAdditionally, an error occurred attempting to restore the original object:\n\n%v\n", err, recreateErr) } else { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply_edit_last_applied.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply_edit_last_applied.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply_edit_last_applied.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply_edit_last_applied.go index 1312049910fce..0290a55e04218 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply_edit_last_applied.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply_edit_last_applied.go @@ -14,15 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package apply import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -59,11 +59,11 @@ func NewCmdApplyEditLastApplied(f cmdutil.Factory, ioStreams genericclioptions.I o := editor.NewEditOptions(editor.ApplyEditMode, ioStreams) cmd := &cobra.Command{ - Use: "edit-last-applied (RESOURCE/NAME | -f FILENAME)", + Use: "edit-last-applied (RESOURCE/NAME | -f FILENAME)", DisableFlagsInUseLine: true, - Short: "Edit latest last-applied-configuration annotations of a resource/object", - Long: applyEditLastAppliedLong, - Example: applyEditLastAppliedExample, + Short: "Edit latest last-applied-configuration annotations of a resource/object", + Long: applyEditLastAppliedLong, + Example: applyEditLastAppliedExample, Run: func(cmd *cobra.Command, args []string) { if err := o.Complete(f, args, cmd); err != nil { cmdutil.CheckErr(err) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply_set_last_applied.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply_set_last_applied.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply_set_last_applied.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply_set_last_applied.go index 899e7311fbb4f..23699f407fce5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply_set_last_applied.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply_set_last_applied.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package apply import ( "bytes" @@ -30,11 +30,11 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) type SetLastAppliedOptions struct { @@ -91,11 +91,11 @@ func NewSetLastAppliedOptions(ioStreams genericclioptions.IOStreams) *SetLastApp func NewCmdApplySetLastApplied(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { o := NewSetLastAppliedOptions(ioStreams) cmd := &cobra.Command{ - Use: "set-last-applied -f FILENAME", + Use: "set-last-applied -f FILENAME", DisableFlagsInUseLine: true, - Short: i18n.T("Set the last-applied-configuration annotation on a live object to match the contents of a file."), - Long: applySetLastAppliedLong, - Example: applySetLastAppliedExample, + Short: i18n.T("Set the last-applied-configuration annotation on a live object to match the contents of a file."), + Long: applySetLastAppliedLong, + Example: applySetLastAppliedExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd)) cmdutil.CheckErr(o.Validate()) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply_view_last_applied.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply_view_last_applied.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply_view_last_applied.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply_view_last_applied.go index c960ee5e0af76..e35b0c434cb01 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply_view_last_applied.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply_view_last_applied.go @@ -14,21 +14,21 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package apply import ( "bytes" "encoding/json" "fmt" - "github.com/ghodss/yaml" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" + "sigs.k8s.io/yaml" ) type ViewLastAppliedOptions struct { @@ -69,11 +69,11 @@ func NewCmdApplyViewLastApplied(f cmdutil.Factory, ioStreams genericclioptions.I options := NewViewLastAppliedOptions(ioStreams) cmd := &cobra.Command{ - Use: "view-last-applied (TYPE [NAME | -l label] | TYPE/NAME | -f FILENAME)", + Use: "view-last-applied (TYPE [NAME | -l label] | TYPE/NAME | -f FILENAME)", DisableFlagsInUseLine: true, - Short: i18n.T("View latest last-applied-configuration annotations of a resource/object"), - Long: applyViewLastAppliedLong, - Example: applyViewLastAppliedExample, + Short: i18n.T("View latest last-applied-configuration annotations of a resource/object"), + Long: applyViewLastAppliedLong, + Example: applyViewLastAppliedExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(cmd, f, args)) cmdutil.CheckErr(options.Validate(cmd)) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/attach/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/attach/BUILD.bazel new file mode 100644 index 0000000000000..764b915bb9732 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/attach/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["attach.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/attach", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/attach", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/attach.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/attach/attach.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/attach.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/attach/attach.go index 9ecaf4726dcde..c0bfd7831636f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/attach.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/attach/attach.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package attach import ( "fmt" @@ -22,8 +22,8 @@ import ( "net/url" "time" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -31,12 +31,12 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/resource" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + "k8s.io/kubernetes/pkg/kubectl/cmd/exec" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -63,7 +63,7 @@ const ( // AttachOptions declare the arguments accepted by the Exec command type AttachOptions struct { - StreamOptions + exec.StreamOptions // whether to disable use of standard error when streaming output from tty DisableStderr bool @@ -86,22 +86,22 @@ type AttachOptions struct { func NewAttachOptions(streams genericclioptions.IOStreams) *AttachOptions { return &AttachOptions{ - StreamOptions: StreamOptions{ + StreamOptions: exec.StreamOptions{ IOStreams: streams, }, Attach: &DefaultRemoteAttach{}, - AttachFunc: defaultAttachFunc, + AttachFunc: DefaultAttachFunc, } } func NewCmdAttach(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewAttachOptions(streams) cmd := &cobra.Command{ - Use: "attach (POD | TYPE/NAME) -c CONTAINER", + Use: "attach (POD | TYPE/NAME) -c CONTAINER", DisableFlagsInUseLine: true, - Short: i18n.T("Attach to a running container"), - Long: "Attach to a process that is already running inside an existing container.", - Example: attachExample, + Short: i18n.T("Attach to a running container"), + Long: "Attach to a process that is already running inside an existing container.", + Example: attachExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -120,7 +120,7 @@ type RemoteAttach interface { Attach(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error } -func defaultAttachFunc(o *AttachOptions, containerToAttach *corev1.Container, raw bool, sizeQueue remotecommand.TerminalSizeQueue) func() error { +func DefaultAttachFunc(o *AttachOptions, containerToAttach *corev1.Container, raw bool, sizeQueue remotecommand.TerminalSizeQueue) func() error { return func() error { restClient, err := restclient.RESTClientFor(o.Config) if err != nil { @@ -137,7 +137,7 @@ func defaultAttachFunc(o *AttachOptions, containerToAttach *corev1.Container, ra Stdout: o.Out != nil, Stderr: !o.DisableStderr, TTY: raw, - }, legacyscheme.ParameterCodec) + }, scheme.ParameterCodec) return o.Attach.Attach("POST", req.URL(), o.Config, o.In, o.Out, o.ErrOut, raw, sizeQueue) } @@ -263,7 +263,7 @@ func (o *AttachOptions) Run() error { } // ensure we can recover the terminal while attached - t := o.setupTTY() + t := o.SetupTTY() var sizeQueue remotecommand.TerminalSizeQueue if t.Raw { @@ -326,7 +326,7 @@ func (o *AttachOptions) containerToAttachTo(pod *corev1.Pod) (*corev1.Container, fmt.Fprintf(o.ErrOut, "%s\n", o.SuggestedCmdUsage) } - glog.V(4).Infof("defaulting container name to %s", pod.Spec.Containers[0].Name) + klog.V(4).Infof("defaulting container name to %s", pod.Spec.Containers[0].Name) return &pod.Spec.Containers[0], nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/BUILD.bazel index 73298bf761f42..b2c06db3b0caf 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/BUILD.bazel @@ -11,7 +11,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/auth", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/api/authorization/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", @@ -25,9 +24,10 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", "//vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/OWNERS new file mode 100644 index 0000000000000..cd0d70a0f8f02 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-authorizers-approvers +reviewers: +- sig-auth-authorizers-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/cani.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/cani.go index 7762fa7c25457..31ce080dd1ac8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/cani.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/cani.go @@ -30,8 +30,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/cli-runtime/pkg/genericclioptions" authorizationv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // CanIOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of @@ -86,11 +86,11 @@ func NewCmdCanI(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.C } cmd := &cobra.Command{ - Use: "can-i VERB [TYPE | TYPE/NAME | NONRESOURCEURL]", + Use: "can-i VERB [TYPE | TYPE/NAME | NONRESOURCEURL]", DisableFlagsInUseLine: true, - Short: "Check whether an action is allowed", - Long: canILong, - Example: canIExample, + Short: "Check whether an action is allowed", + Long: canILong, + Example: canIExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, args)) cmdutil.CheckErr(o.Validate()) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/reconcile.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/reconcile.go index 6d042af7e1fea..e2dafca743d93 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/reconcile.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/auth/reconcile.go @@ -20,8 +20,8 @@ import ( "errors" "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -31,9 +31,9 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/resource" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "k8s.io/kubernetes/pkg/registry/rbac/reconciliation" ) @@ -78,11 +78,11 @@ func NewCmdReconcile(f cmdutil.Factory, streams genericclioptions.IOStreams) *co o := NewReconcileOptions(streams) cmd := &cobra.Command{ - Use: "reconcile -f FILENAME", + Use: "reconcile -f FILENAME", DisableFlagsInUseLine: true, - Short: "Reconciles rules for RBAC Role, RoleBinding, ClusterRole, and ClusterRole binding objects", - Long: reconcileLong, - Example: reconcileExample, + Short: "Reconciles rules for RBAC Role, RoleBinding, ClusterRole, and ClusterRole binding objects", + Long: reconcileLong, + Example: reconcileExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(cmd, f, args)) cmdutil.CheckErr(o.Validate()) @@ -182,7 +182,7 @@ func (o *ReconcileOptions) RunReconcile() error { reconcileOptions := reconciliation.ReconcileRoleOptions{ Confirm: !o.DryRun, RemoveExtraPermissions: o.RemoveExtraPermissions, - Role: reconciliation.RoleRuleOwner{Role: t}, + Role: reconciliation.RoleRuleOwner{Role: t}, Client: reconciliation.RoleModifier{ NamespaceClient: o.NamespaceClient.Namespaces(), Client: o.RBACClient, @@ -198,7 +198,7 @@ func (o *ReconcileOptions) RunReconcile() error { reconcileOptions := reconciliation.ReconcileRoleOptions{ Confirm: !o.DryRun, RemoveExtraPermissions: o.RemoveExtraPermissions, - Role: reconciliation.ClusterRoleRuleOwner{ClusterRole: t}, + Role: reconciliation.ClusterRoleRuleOwner{ClusterRole: t}, Client: reconciliation.ClusterRoleModifier{ Client: o.RBACClient.ClusterRoles(), }, @@ -251,7 +251,7 @@ func (o *ReconcileOptions) RunReconcile() error { return fmt.Errorf("only rbac.authorization.k8s.io/v1 is supported: not %T", t) default: - glog.V(1).Infof("skipping %#v", info.Object.GetObjectKind()) + klog.V(1).Infof("skipping %#v", info.Object.GetObjectKind()) // skip ignored resources } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale/BUILD.bazel new file mode 100644 index 0000000000000..1afc2889dda68 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale/BUILD.bazel @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["autoscale.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/autoscale", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/generate:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale/autoscale.go similarity index 84% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale/autoscale.go index da5a00d6ccb40..9f59e7bbd8269 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale/autoscale.go @@ -14,13 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package autoscale import ( "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" autoscalingv1 "k8s.io/api/autoscaling/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -28,20 +28,21 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" autoscalingv1client "k8s.io/client-go/kubernetes/typed/autoscaling/v1" - "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( autoscaleLong = templates.LongDesc(i18n.T(` Creates an autoscaler that automatically chooses and sets the number of pods that run in a kubernetes cluster. - Looks up a Deployment, ReplicaSet, or ReplicationController by name and creates an autoscaler that uses the given resource as a reference. + Looks up a Deployment, ReplicaSet, StatefulSet, or ReplicationController by name and creates an autoscaler that uses the given resource as a reference. An autoscaler can automatically increase or decrease number of pods deployed within the system as needed.`)) autoscaleExample = templates.Examples(i18n.T(` @@ -74,7 +75,7 @@ type AutoscaleOptions struct { dryRun bool builder *resource.Builder canBeAutoscaled polymorphichelpers.CanBeAutoscaledFunc - generatorFunc func(string, *meta.RESTMapping) (kubectl.StructuredGenerator, error) + generatorFunc func(string, *meta.RESTMapping) (generate.StructuredGenerator, error) HPAClient autoscalingv1client.HorizontalPodAutoscalersGetter @@ -98,11 +99,11 @@ func NewCmdAutoscale(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) * validArgs := []string{"deployment", "replicaset", "replicationcontroller"} cmd := &cobra.Command{ - Use: "autoscale (-f FILENAME | TYPE NAME | TYPE/NAME) [--min=MINPODS] --max=MAXPODS [--cpu-percent=CPU]", + Use: "autoscale (-f FILENAME | TYPE NAME | TYPE/NAME) [--min=MINPODS] --max=MAXPODS [--cpu-percent=CPU]", DisableFlagsInUseLine: true, - Short: i18n.T("Auto-scale a Deployment, ReplicaSet, or ReplicationController"), - Long: autoscaleLong, - Example: autoscaleExample, + Short: i18n.T("Auto-scale a Deployment, ReplicaSet, or ReplicationController"), + Long: autoscaleLong, + Example: autoscaleExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -115,7 +116,7 @@ func NewCmdAutoscale(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) * o.RecordFlags.AddFlags(cmd) o.PrintFlags.AddFlags(cmd) - cmd.Flags().StringVar(&o.Generator, "generator", cmdutil.HorizontalPodAutoscalerV1GeneratorName, i18n.T("The name of the API generator to use. Currently there is only 1 generator.")) + cmd.Flags().StringVar(&o.Generator, "generator", generateversioned.HorizontalPodAutoscalerV1GeneratorName, i18n.T("The name of the API generator to use. Currently there is only 1 generator.")) cmd.Flags().Int32Var(&o.Min, "min", -1, "The lower limit for the number of pods that can be set by the autoscaler. If it's not specified or negative, the server will apply a default value.") cmd.Flags().Int32Var(&o.Max, "max", -1, "The upper limit for the number of pods that can be set by the autoscaler. Required.") cmd.MarkFlagRequired("max") @@ -148,17 +149,17 @@ func (o *AutoscaleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args o.HPAClient = kubeClient.AutoscalingV1() // get the generator - o.generatorFunc = func(name string, mapping *meta.RESTMapping) (kubectl.StructuredGenerator, error) { + o.generatorFunc = func(name string, mapping *meta.RESTMapping) (generate.StructuredGenerator, error) { switch o.Generator { - case cmdutil.HorizontalPodAutoscalerV1GeneratorName: - return &kubectl.HorizontalPodAutoscalerGeneratorV1{ + case generateversioned.HorizontalPodAutoscalerV1GeneratorName: + return &generateversioned.HorizontalPodAutoscalerGeneratorV1{ Name: name, MinReplicas: o.Min, MaxReplicas: o.Max, CPUPercent: o.CpuPercent, ScaleRefName: name, ScaleRefKind: mapping.GroupVersionKind.Kind, - ScaleRefApiVersion: mapping.GroupVersionKind.GroupVersion().String(), + ScaleRefAPIVersion: mapping.GroupVersionKind.GroupVersion().String(), }, nil default: return nil, cmdutil.UsageErrorf(cmd, "Generator %s not supported. ", o.Generator) @@ -195,7 +196,7 @@ func (o *AutoscaleOptions) Validate() error { func (o *AutoscaleOptions) Run() error { r := o.builder. - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). ContinueOnError(). NamespaceParam(o.namespace).DefaultNamespace(). FilenameParam(o.enforceNamespace, o.FilenameOptions). @@ -233,7 +234,7 @@ func (o *AutoscaleOptions) Run() error { } if err := o.Recorder.Record(hpa); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } if o.dryRun { @@ -246,7 +247,7 @@ func (o *AutoscaleOptions) Run() error { return printer.PrintObj(hpa, o.Out) } - if err := kubectl.CreateOrUpdateAnnotation(o.createAnnotation, hpa, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(o.createAnnotation, hpa, scheme.DefaultJSONEncoder()); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/certificates/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/certificates/BUILD.bazel new file mode 100644 index 0000000000000..7a718dd42d781 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/certificates/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["certificates.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/certificates", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/certificates", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/certificates.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/certificates/certificates.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/certificates.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/certificates/certificates.go index 52d53f13bb4d4..fabc8d60da7a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/certificates.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/certificates/certificates.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package certificates import ( "fmt" @@ -30,18 +30,18 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" certificatesv1beta1client "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) func NewCmdCertificate(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { cmd := &cobra.Command{ - Use: "certificate SUBCOMMAND", + Use: "certificate SUBCOMMAND", DisableFlagsInUseLine: true, - Short: i18n.T("Modify certificate resources."), - Long: "Modify certificate resources.", + Short: i18n.T("Modify certificate resources."), + Long: "Modify certificate resources.", Run: func(cmd *cobra.Command, args []string) { cmd.Help() }, @@ -108,9 +108,9 @@ func NewCmdCertificateApprove(f cmdutil.Factory, ioStreams genericclioptions.IOS IOStreams: ioStreams, } cmd := &cobra.Command{ - Use: "approve (-f FILENAME | NAME)", + Use: "approve (-f FILENAME | NAME)", DisableFlagsInUseLine: true, - Short: i18n.T("Approve a certificate signing request"), + Short: i18n.T("Approve a certificate signing request"), Long: templates.LongDesc(` Approve a certificate signing request. @@ -165,9 +165,9 @@ func NewCmdCertificateDeny(f cmdutil.Factory, ioStreams genericclioptions.IOStre IOStreams: ioStreams, } cmd := &cobra.Command{ - Use: "deny (-f FILENAME | NAME)", + Use: "deny (-f FILENAME | NAME)", DisableFlagsInUseLine: true, - Short: i18n.T("Deny a certificate signing request"), + Short: i18n.T("Deny a certificate signing request"), Long: templates.LongDesc(` Deny a certificate signing request. @@ -245,7 +245,7 @@ func (options *CertificateOptions) modifyCertificateCondition(builder *resource. } found++ - return options.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), options.Out) + return options.PrintObj(info.Object, options.Out) }) if found == 0 { fmt.Fprintf(options.Out, "No resources found\n") diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo/BUILD.bazel new file mode 100644 index 0000000000000..ff498000b5ac3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo/BUILD.bazel @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "clusterinfo.go", + "clusterinfo_dump.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/daviddengcn/go-colortext:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo/clusterinfo.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo/clusterinfo.go index 4ce0cce40836a..833c81936ca73 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo/clusterinfo.go @@ -14,23 +14,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package clusterinfo import ( "fmt" "io" "strconv" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/resource" restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/api/legacyscheme" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ct "github.com/daviddengcn/go-colortext" "github.com/spf13/cobra" @@ -94,7 +94,7 @@ func (o *ClusterInfoOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) err func (o *ClusterInfoOptions) Run() error { // TODO use generalized labels once they are implemented (#341) b := o.Builder. - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). NamespaceParam(o.Namespace).DefaultNamespace(). LabelSelectorParam("kubernetes.io/cluster-service=true"). ResourceTypeOrNameArgs(false, []string{"services"}...). @@ -105,7 +105,7 @@ func (o *ClusterInfoOptions) Run() error { } printService(o.Out, "Kubernetes master", o.Client.Host) - services := r.Object.(*api.ServiceList).Items + services := r.Object.(*corev1.ServiceList).Items for _, service := range services { var link string if len(service.Status.LoadBalancer.Ingress) > 0 { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo_dump.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo/clusterinfo_dump.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo_dump.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo/clusterinfo_dump.go index 741ab6ecc8985..c79e2a972f814 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo_dump.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo/clusterinfo_dump.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package clusterinfo import ( "fmt" @@ -31,11 +31,16 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/printers" appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" +) + +const ( + defaultPodLogsTimeout = 20 * time.Second + timeout = 5 * time.Minute ) type ClusterInfoDumpOptions struct { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cmd.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cmd.go index b15dcc53b1f33..9284f6e2c379a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cmd.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cmd.go @@ -28,19 +28,50 @@ import ( "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/api/meta" utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubernetes/pkg/kubectl/cmd/annotate" + "k8s.io/kubernetes/pkg/kubectl/cmd/apiresources" + "k8s.io/kubernetes/pkg/kubectl/cmd/apply" + "k8s.io/kubernetes/pkg/kubectl/cmd/attach" "k8s.io/kubernetes/pkg/kubectl/cmd/auth" + "k8s.io/kubernetes/pkg/kubectl/cmd/autoscale" + "k8s.io/kubernetes/pkg/kubectl/cmd/certificates" + "k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo" + "k8s.io/kubernetes/pkg/kubectl/cmd/completion" cmdconfig "k8s.io/kubernetes/pkg/kubectl/cmd/config" + "k8s.io/kubernetes/pkg/kubectl/cmd/convert" + "k8s.io/kubernetes/pkg/kubectl/cmd/cp" "k8s.io/kubernetes/pkg/kubectl/cmd/create" + "k8s.io/kubernetes/pkg/kubectl/cmd/delete" + "k8s.io/kubernetes/pkg/kubectl/cmd/describe" + "k8s.io/kubernetes/pkg/kubectl/cmd/diff" + "k8s.io/kubernetes/pkg/kubectl/cmd/drain" + "k8s.io/kubernetes/pkg/kubectl/cmd/edit" + cmdexec "k8s.io/kubernetes/pkg/kubectl/cmd/exec" + "k8s.io/kubernetes/pkg/kubectl/cmd/explain" + "k8s.io/kubernetes/pkg/kubectl/cmd/expose" "k8s.io/kubernetes/pkg/kubectl/cmd/get" + "k8s.io/kubernetes/pkg/kubectl/cmd/label" + "k8s.io/kubernetes/pkg/kubectl/cmd/logs" + "k8s.io/kubernetes/pkg/kubectl/cmd/options" + "k8s.io/kubernetes/pkg/kubectl/cmd/patch" + "k8s.io/kubernetes/pkg/kubectl/cmd/plugin" + "k8s.io/kubernetes/pkg/kubectl/cmd/portforward" + "k8s.io/kubernetes/pkg/kubectl/cmd/proxy" + "k8s.io/kubernetes/pkg/kubectl/cmd/replace" + "k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate" "k8s.io/kubernetes/pkg/kubectl/cmd/rollout" + "k8s.io/kubernetes/pkg/kubectl/cmd/run" + "k8s.io/kubernetes/pkg/kubectl/cmd/scale" "k8s.io/kubernetes/pkg/kubectl/cmd/set" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + "k8s.io/kubernetes/pkg/kubectl/cmd/taint" + "k8s.io/kubernetes/pkg/kubectl/cmd/top" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/cmd/version" "k8s.io/kubernetes/pkg/kubectl/cmd/wait" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "k8s.io/cli-runtime/pkg/genericclioptions" ) @@ -253,7 +284,7 @@ __custom_func() { ) var ( - bash_completion_flags = map[string]string{ + bashCompletionFlags = map[string]string{ "namespace": "__kubectl_get_resource_namespace", "context": "__kubectl_config_get_contexts", "cluster": "__kubectl_config_get_clusters", @@ -261,10 +292,12 @@ var ( } ) +// NewDefaultKubectlCommand creates the `kubectl` command with default arguments func NewDefaultKubectlCommand() *cobra.Command { return NewDefaultKubectlCommandWithArgs(&defaultPluginHandler{}, os.Args, os.Stdin, os.Stdout, os.Stderr) } +// NewDefaultKubectlCommandWithArgs creates the `kubectl` command with arguments func NewDefaultKubectlCommandWithArgs(pluginHandler PluginHandler, args []string, in io.Reader, out, errout io.Writer) *cobra.Command { cmd := NewKubectlCommand(in, out, errout) @@ -370,6 +403,14 @@ func NewKubectlCommand(in io.Reader, out, err io.Writer) *cobra.Command { Find more information at: https://kubernetes.io/docs/reference/kubectl/overview/`), Run: runHelp, + // Hook before and after Run initialize and write profiles to disk, + // respectively. + PersistentPreRunE: func(*cobra.Command, []string) error { + return initProfiling() + }, + PersistentPostRunE: func(*cobra.Command, []string) error { + return flushProfiling() + }, BashCompletionFunction: bashCompletionFunc, } @@ -380,7 +421,9 @@ func NewKubectlCommand(in io.Reader, out, err io.Writer) *cobra.Command { // a.k.a. change all "_" to "-". e.g. glog package flags.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) - kubeConfigFlags := genericclioptions.NewConfigFlags() + addProfilingFlags(flags) + + kubeConfigFlags := genericclioptions.NewConfigFlags().WithDeprecatedPasswordFlag() kubeConfigFlags.AddFlags(flags) matchVersionKubeConfigFlags := cmdutil.NewMatchVersionFlags(kubeConfigFlags) matchVersionKubeConfigFlags.AddFlags(cmds.PersistentFlags()) @@ -406,71 +449,71 @@ func NewKubectlCommand(in io.Reader, out, err io.Writer) *cobra.Command { Message: "Basic Commands (Beginner):", Commands: []*cobra.Command{ create.NewCmdCreate(f, ioStreams), - NewCmdExposeService(f, ioStreams), - NewCmdRun(f, ioStreams), + expose.NewCmdExposeService(f, ioStreams), + run.NewCmdRun(f, ioStreams), set.NewCmdSet(f, ioStreams), - deprecatedAlias("run-container", NewCmdRun(f, ioStreams)), }, }, { Message: "Basic Commands (Intermediate):", Commands: []*cobra.Command{ - NewCmdExplain("kubectl", f, ioStreams), + explain.NewCmdExplain("kubectl", f, ioStreams), get.NewCmdGet("kubectl", f, ioStreams), - NewCmdEdit(f, ioStreams), - NewCmdDelete(f, ioStreams), + edit.NewCmdEdit(f, ioStreams), + delete.NewCmdDelete(f, ioStreams), }, }, { Message: "Deploy Commands:", Commands: []*cobra.Command{ rollout.NewCmdRollout(f, ioStreams), - NewCmdRollingUpdate(f, ioStreams), - NewCmdScale(f, ioStreams), - NewCmdAutoscale(f, ioStreams), + rollingupdate.NewCmdRollingUpdate(f, ioStreams), + scale.NewCmdScale(f, ioStreams), + autoscale.NewCmdAutoscale(f, ioStreams), }, }, { Message: "Cluster Management Commands:", Commands: []*cobra.Command{ - NewCmdCertificate(f, ioStreams), - NewCmdClusterInfo(f, ioStreams), - NewCmdTop(f, ioStreams), - NewCmdCordon(f, ioStreams), - NewCmdUncordon(f, ioStreams), - NewCmdDrain(f, ioStreams), - NewCmdTaint(f, ioStreams), + certificates.NewCmdCertificate(f, ioStreams), + clusterinfo.NewCmdClusterInfo(f, ioStreams), + top.NewCmdTop(f, ioStreams), + drain.NewCmdCordon(f, ioStreams), + drain.NewCmdUncordon(f, ioStreams), + drain.NewCmdDrain(f, ioStreams), + taint.NewCmdTaint(f, ioStreams), }, }, { Message: "Troubleshooting and Debugging Commands:", Commands: []*cobra.Command{ - NewCmdDescribe("kubectl", f, ioStreams), - NewCmdLogs(f, ioStreams), - NewCmdAttach(f, ioStreams), - NewCmdExec(f, ioStreams), - NewCmdPortForward(f, ioStreams), - NewCmdProxy(f, ioStreams), - NewCmdCp(f, ioStreams), + describe.NewCmdDescribe("kubectl", f, ioStreams), + logs.NewCmdLogs(f, ioStreams), + attach.NewCmdAttach(f, ioStreams), + cmdexec.NewCmdExec(f, ioStreams), + portforward.NewCmdPortForward(f, ioStreams), + proxy.NewCmdProxy(f, ioStreams), + cp.NewCmdCp(f, ioStreams), auth.NewCmdAuth(f, ioStreams), }, }, { Message: "Advanced Commands:", Commands: []*cobra.Command{ - NewCmdApply("kubectl", f, ioStreams), - NewCmdPatch(f, ioStreams), - NewCmdReplace(f, ioStreams), + diff.NewCmdDiff(f, ioStreams), + apply.NewCmdApply("kubectl", f, ioStreams), + patch.NewCmdPatch(f, ioStreams), + replace.NewCmdReplace(f, ioStreams), wait.NewCmdWait(f, ioStreams), - NewCmdConvert(f, ioStreams), + convert.NewCmdConvert(f, ioStreams), }, }, { Message: "Settings Commands:", Commands: []*cobra.Command{ - NewCmdLabel(f, ioStreams), - NewCmdAnnotate("kubectl", f, ioStreams), - NewCmdCompletion(ioStreams.Out, ""), + label.NewCmdLabel(f, ioStreams), + annotate.NewCmdAnnotate("kubectl", f, ioStreams), + completion.NewCmdCompletion(ioStreams.Out, ""), }, }, } @@ -486,7 +529,7 @@ func NewKubectlCommand(in io.Reader, out, err io.Writer) *cobra.Command { templates.ActsAsRootCommand(cmds, filters, groups...) - for name, completion := range bash_completion_flags { + for name, completion := range bashCompletionFlags { if cmds.Flag(name) != nil { if cmds.Flag(name).Annotations == nil { cmds.Flag(name).Annotations = map[string][]string{} @@ -500,11 +543,11 @@ func NewKubectlCommand(in io.Reader, out, err io.Writer) *cobra.Command { cmds.AddCommand(alpha) cmds.AddCommand(cmdconfig.NewCmdConfig(f, clientcmd.NewDefaultPathOptions(), ioStreams)) - cmds.AddCommand(NewCmdPlugin(f, ioStreams)) - cmds.AddCommand(NewCmdVersion(f, ioStreams)) - cmds.AddCommand(NewCmdApiVersions(f, ioStreams)) - cmds.AddCommand(NewCmdApiResources(f, ioStreams)) - cmds.AddCommand(NewCmdOptions(ioStreams.Out)) + cmds.AddCommand(plugin.NewCmdPlugin(f, ioStreams)) + cmds.AddCommand(version.NewCmdVersion(f, ioStreams)) + cmds.AddCommand(apiresources.NewCmdAPIVersions(f, ioStreams)) + cmds.AddCommand(apiresources.NewCmdAPIResources(f, ioStreams)) + cmds.AddCommand(options.NewCmdOptions(ioStreams.Out)) return cmds } @@ -513,10 +556,6 @@ func runHelp(cmd *cobra.Command, args []string) { cmd.Help() } -func printDeprecationWarning(errOut io.Writer, command, alias string) { - fmt.Fprintf(errOut, "%s is DEPRECATED and will be removed in a future version. Use %s instead.\n", alias, command) -} - // deprecatedAlias is intended to be used to create a "wrapper" command around // an existing command. The wrapper works the same but prints a deprecation // message before running. This command is identical functionality. @@ -531,5 +570,3 @@ func deprecatedAlias(deprecatedVersion string, cmd *cobra.Command) *cobra.Comman cmd.Hidden = true return cmd } - -var metadataAccessor = meta.NewAccessor() diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/completion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/completion/BUILD.bazel new file mode 100644 index 0000000000000..84760713e4f70 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/completion/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["completion.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/completion", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/completion", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/completion.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/completion/completion.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/completion.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/completion/completion.go index 4bafc66fea780..cd8fd36e0f121 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/completion.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/completion/completion.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package completion import ( "bytes" @@ -22,9 +22,9 @@ import ( "github.com/spf13/cobra" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) const defaultBoilerPlate = ` @@ -99,11 +99,11 @@ func NewCmdCompletion(out io.Writer, boilerPlate string) *cobra.Command { } cmd := &cobra.Command{ - Use: "completion SHELL", + Use: "completion SHELL", DisableFlagsInUseLine: true, - Short: i18n.T("Output shell completion code for the specified shell (bash or zsh)"), - Long: completion_long, - Example: completion_example, + Short: i18n.T("Output shell completion code for the specified shell (bash or zsh)"), + Long: completion_long, + Example: completion_example, Run: func(cmd *cobra.Command, args []string) { err := RunCompletion(out, boilerPlate, cmd, args) cmdutil.CheckErr(err) @@ -264,7 +264,7 @@ __kubectl_quote() { # Leave out first character printf %q "${1:1}" else - printf %q "$1" + printf %q "$1" fi } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/BUILD.bazel index 6b8a75db8d2e6..9775f029f0f21 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/BUILD.bazel @@ -28,13 +28,14 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd/api/latest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/printers:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/printers:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/config.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/config.go index 720f59f359956..4e1b63e623f79 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/config.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/config.go @@ -25,9 +25,9 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // NewCmdConfig creates a command object for the "config" action, and adds all child commands to it. @@ -37,9 +37,9 @@ func NewCmdConfig(f cmdutil.Factory, pathOptions *clientcmd.PathOptions, streams } cmd := &cobra.Command{ - Use: "config SUBCOMMAND", + Use: "config SUBCOMMAND", DisableFlagsInUseLine: true, - Short: i18n.T("Modify kubeconfig files"), + Short: i18n.T("Modify kubeconfig files"), Long: templates.LongDesc(` Modify kubeconfig files using subcommands like "kubectl config set current-context my-context" diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_authinfo.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_authinfo.go index 368d47c0fb339..0dca066c1fdff 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_authinfo.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_authinfo.go @@ -29,9 +29,9 @@ import ( "k8s.io/apiserver/pkg/util/flag" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) type createAuthInfoOptions struct { @@ -100,11 +100,11 @@ func NewCmdConfigSetAuthInfo(out io.Writer, configAccess clientcmd.ConfigAccess) func newCmdConfigSetAuthInfo(out io.Writer, options *createAuthInfoOptions) *cobra.Command { cmd := &cobra.Command{ - Use: fmt.Sprintf("set-credentials NAME [--%v=path/to/certfile] [--%v=path/to/keyfile] [--%v=bearer_token] [--%v=basic_user] [--%v=basic_password] [--%v=provider_name] [--%v=key=value]", clientcmd.FlagCertFile, clientcmd.FlagKeyFile, clientcmd.FlagBearerToken, clientcmd.FlagUsername, clientcmd.FlagPassword, flagAuthProvider, flagAuthProviderArg), + Use: fmt.Sprintf("set-credentials NAME [--%v=path/to/certfile] [--%v=path/to/keyfile] [--%v=bearer_token] [--%v=basic_user] [--%v=basic_password] [--%v=provider_name] [--%v=key=value]", clientcmd.FlagCertFile, clientcmd.FlagKeyFile, clientcmd.FlagBearerToken, clientcmd.FlagUsername, clientcmd.FlagPassword, flagAuthProvider, flagAuthProviderArg), DisableFlagsInUseLine: true, - Short: i18n.T("Sets a user entry in kubeconfig"), - Long: create_authinfo_long, - Example: create_authinfo_example, + Short: i18n.T("Sets a user entry in kubeconfig"), + Long: create_authinfo_long, + Example: create_authinfo_example, Run: func(cmd *cobra.Command, args []string) { err := options.complete(cmd, out) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_cluster.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_cluster.go index f1f2d091bca86..a07dfd6c7726f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_cluster.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_cluster.go @@ -28,9 +28,9 @@ import ( "k8s.io/apiserver/pkg/util/flag" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) type createClusterOptions struct { @@ -63,11 +63,11 @@ func NewCmdConfigSetCluster(out io.Writer, configAccess clientcmd.ConfigAccess) options := &createClusterOptions{configAccess: configAccess} cmd := &cobra.Command{ - Use: fmt.Sprintf("set-cluster NAME [--%v=server] [--%v=path/to/certificate/authority] [--%v=true]", clientcmd.FlagAPIServer, clientcmd.FlagCAFile, clientcmd.FlagInsecure), + Use: fmt.Sprintf("set-cluster NAME [--%v=server] [--%v=path/to/certificate/authority] [--%v=true]", clientcmd.FlagAPIServer, clientcmd.FlagCAFile, clientcmd.FlagInsecure), DisableFlagsInUseLine: true, - Short: i18n.T("Sets a cluster entry in kubeconfig"), - Long: create_cluster_long, - Example: create_cluster_example, + Short: i18n.T("Sets a cluster entry in kubeconfig"), + Long: create_cluster_long, + Example: create_cluster_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.complete(cmd)) cmdutil.CheckErr(options.run()) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_context.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_context.go index 46e30df99d934..50b965cc629a6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_context.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_context.go @@ -26,9 +26,9 @@ import ( "k8s.io/apiserver/pkg/util/flag" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) type createContextOptions struct { @@ -55,11 +55,11 @@ func NewCmdConfigSetContext(out io.Writer, configAccess clientcmd.ConfigAccess) options := &createContextOptions{configAccess: configAccess} cmd := &cobra.Command{ - Use: fmt.Sprintf("set-context [NAME | --current] [--%v=cluster_nickname] [--%v=user_nickname] [--%v=namespace]", clientcmd.FlagClusterName, clientcmd.FlagAuthInfoName, clientcmd.FlagNamespace), + Use: fmt.Sprintf("set-context [NAME | --current] [--%v=cluster_nickname] [--%v=user_nickname] [--%v=namespace]", clientcmd.FlagClusterName, clientcmd.FlagAuthInfoName, clientcmd.FlagNamespace), DisableFlagsInUseLine: true, - Short: i18n.T("Sets a context entry in kubeconfig"), - Long: create_context_long, - Example: create_context_example, + Short: i18n.T("Sets a context entry in kubeconfig"), + Long: create_context_long, + Example: create_context_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.complete(cmd)) name, exists, err := options.run() @@ -142,10 +142,10 @@ func (o *createContextOptions) complete(cmd *cobra.Command) error { func (o createContextOptions) validate() error { if len(o.name) == 0 && !o.currContext { - return errors.New("you must specify a non-empty context name or --current-context") + return errors.New("you must specify a non-empty context name or --current") } if len(o.name) > 0 && o.currContext { - return errors.New("you cannot specify a context name and --current-context") + return errors.New("you cannot specify both a context name and --current") } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/current_context.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/current_context.go index da47c14af97fa..3751d8b25c921 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/current_context.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/current_context.go @@ -23,9 +23,9 @@ import ( "github.com/spf13/cobra" "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) type CurrentContextOptions struct { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/delete_cluster.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/delete_cluster.go index a9e00ce8348e4..9ae52e87c11a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/delete_cluster.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/delete_cluster.go @@ -22,9 +22,9 @@ import ( "github.com/spf13/cobra" "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -35,11 +35,11 @@ var ( func NewCmdConfigDeleteCluster(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { cmd := &cobra.Command{ - Use: "delete-cluster NAME", + Use: "delete-cluster NAME", DisableFlagsInUseLine: true, - Short: i18n.T("Delete the specified cluster from the kubeconfig"), - Long: "Delete the specified cluster from the kubeconfig", - Example: delete_cluster_example, + Short: i18n.T("Delete the specified cluster from the kubeconfig"), + Long: "Delete the specified cluster from the kubeconfig", + Example: delete_cluster_example, Run: func(cmd *cobra.Command, args []string) { err := runDeleteCluster(out, configAccess, cmd) cmdutil.CheckErr(err) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/delete_context.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/delete_context.go index d6dad310221da..76ae529fd7e4a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/delete_context.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/delete_context.go @@ -22,9 +22,9 @@ import ( "github.com/spf13/cobra" "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -35,11 +35,11 @@ var ( func NewCmdConfigDeleteContext(out, errOut io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { cmd := &cobra.Command{ - Use: "delete-context NAME", + Use: "delete-context NAME", DisableFlagsInUseLine: true, - Short: i18n.T("Delete the specified context from the kubeconfig"), - Long: "Delete the specified context from the kubeconfig", - Example: delete_context_example, + Short: i18n.T("Delete the specified context from the kubeconfig"), + Long: "Delete the specified context from the kubeconfig", + Example: delete_context_example, Run: func(cmd *cobra.Command, args []string) { err := runDeleteContext(out, errOut, configAccess, cmd) cmdutil.CheckErr(err) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/get_clusters.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/get_clusters.go index c19450f241b3f..bbaebf9e7a710 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/get_clusters.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/get_clusters.go @@ -22,9 +22,9 @@ import ( "github.com/spf13/cobra" "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/get_contexts.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/get_contexts.go index 30ccba502db4b..30f53db794504 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/get_contexts.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/get_contexts.go @@ -30,10 +30,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" + "k8s.io/kubernetes/pkg/kubectl/util/printers" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // GetContextsOptions contains the assignable options from the args. @@ -67,11 +67,11 @@ func NewCmdConfigGetContexts(streams genericclioptions.IOStreams, configAccess c } cmd := &cobra.Command{ - Use: "get-contexts [(-o|--output=)name)]", + Use: "get-contexts [(-o|--output=)name)]", DisableFlagsInUseLine: true, - Short: i18n.T("Describe one or many contexts"), - Long: getContextsLong, - Example: getContextsExample, + Short: i18n.T("Describe one or many contexts"), + Long: getContextsLong, + Example: getContextsExample, Run: func(cmd *cobra.Command, args []string) { validOutputTypes := sets.NewString("", "json", "yaml", "wide", "name", "custom-columns", "custom-columns-file", "go-template", "go-template-file", "jsonpath", "jsonpath-file") supportedOutputTypes := sets.NewString("", "name") diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/rename_context.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/rename_context.go index b5fc2e728de10..535a811102e3f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/rename_context.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/rename_context.go @@ -24,8 +24,8 @@ import ( "github.com/spf13/cobra" "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // RenameContextOptions contains the options for running the rename-context cli command. @@ -61,11 +61,11 @@ func NewCmdConfigRenameContext(out io.Writer, configAccess clientcmd.ConfigAcces options := &RenameContextOptions{configAccess: configAccess} cmd := &cobra.Command{ - Use: renameContextUse, + Use: renameContextUse, DisableFlagsInUseLine: true, - Short: renameContextShort, - Long: renameContextLong, - Example: renameContextExample, + Short: renameContextShort, + Long: renameContextLong, + Example: renameContextExample, Run: func(cmd *cobra.Command, args []string) { if err := options.Complete(cmd, args, out); err != nil { cmdutil.CheckErr(err) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/set.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/set.go index 184a987fc3f27..9117402bba17f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/set.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/set.go @@ -28,9 +28,9 @@ import ( "k8s.io/apiserver/pkg/util/flag" "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) type setOptions struct { @@ -51,10 +51,10 @@ func NewCmdConfigSet(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra. options := &setOptions{configAccess: configAccess} cmd := &cobra.Command{ - Use: "set PROPERTY_NAME PROPERTY_VALUE", + Use: "set PROPERTY_NAME PROPERTY_VALUE", DisableFlagsInUseLine: true, - Short: i18n.T("Sets an individual value in a kubeconfig file"), - Long: set_long, + Short: i18n.T("Sets an individual value in a kubeconfig file"), + Long: set_long, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.complete(cmd)) cmdutil.CheckErr(options.run()) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/unset.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/unset.go index 18345cd491419..02a519cb42c89 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/unset.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/unset.go @@ -23,7 +23,7 @@ import ( "reflect" "github.com/spf13/cobra" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "k8s.io/client-go/tools/clientcmd" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -53,11 +53,11 @@ func NewCmdConfigUnset(out io.Writer, configAccess clientcmd.ConfigAccess) *cobr options := &unsetOptions{configAccess: configAccess} cmd := &cobra.Command{ - Use: "unset PROPERTY_NAME", + Use: "unset PROPERTY_NAME", DisableFlagsInUseLine: true, - Short: i18n.T("Unsets an individual value in a kubeconfig file"), - Long: unsetLong, - Example: unsetExample, + Short: i18n.T("Unsets an individual value in a kubeconfig file"), + Long: unsetLong, + Example: unsetExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.complete(cmd, args)) cmdutil.CheckErr(options.run(out)) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/use_context.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/use_context.go index 747837e093912..b22251af261b2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/use_context.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/use_context.go @@ -25,9 +25,9 @@ import ( "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -45,12 +45,12 @@ func NewCmdConfigUseContext(out io.Writer, configAccess clientcmd.ConfigAccess) options := &useContextOptions{configAccess: configAccess} cmd := &cobra.Command{ - Use: "use-context CONTEXT_NAME", + Use: "use-context CONTEXT_NAME", DisableFlagsInUseLine: true, - Short: i18n.T("Sets the current-context in a kubeconfig file"), - Aliases: []string{"use"}, - Long: `Sets the current-context in a kubeconfig file`, - Example: use_context_example, + Short: i18n.T("Sets the current-context in a kubeconfig file"), + Aliases: []string{"use"}, + Long: `Sets the current-context in a kubeconfig file`, + Example: use_context_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.complete(cmd)) cmdutil.CheckErr(options.run()) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/view.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/view.go index 7915fd95ff493..7017cb6a9cf87 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/view.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/config/view.go @@ -23,14 +23,14 @@ import ( "k8s.io/apiserver/pkg/util/flag" "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/tools/clientcmd/api/latest" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) type ViewOptions struct { @@ -70,7 +70,7 @@ var ( func NewCmdConfigView(f cmdutil.Factory, streams genericclioptions.IOStreams, ConfigAccess clientcmd.ConfigAccess) *cobra.Command { o := &ViewOptions{ - PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(legacyscheme.Scheme).WithDefaultOutput("yaml"), + PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme).WithDefaultOutput("yaml"), ConfigAccess: ConfigAccess, IOStreams: streams, @@ -82,7 +82,7 @@ func NewCmdConfigView(f cmdutil.Factory, streams genericclioptions.IOStreams, Co Long: view_long, Example: view_example, Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(o.Complete(cmd)) + cmdutil.CheckErr(o.Complete(cmd, args)) cmdutil.CheckErr(o.Validate()) cmdutil.CheckErr(o.Run()) }, @@ -99,7 +99,10 @@ func NewCmdConfigView(f cmdutil.Factory, streams genericclioptions.IOStreams, Co return cmd } -func (o *ViewOptions) Complete(cmd *cobra.Command) error { +func (o *ViewOptions) Complete(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return cmdutil.UsageErrorf(cmd, "unexpected arguments: %v", args) + } if o.ConfigAccess.IsExplicitFile() { if !o.Merge.Provided() { o.Merge.Set("false") diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert/BUILD.bazel similarity index 58% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD.bazel rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert/BUILD.bazel index 377fdd5697fb2..b2d35bfe2bdc5 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert/BUILD.bazel @@ -3,18 +3,22 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "doc.go", - "register.go", + "convert.go", + "import_known_versions.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme", + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/convert", visibility = ["//visibility:public"], deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/apps/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/authentication/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/authorization/install:go_default_library", @@ -22,14 +26,18 @@ go_library( "//vendor/k8s.io/kubernetes/pkg/apis/batch/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/certificates/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/coordination/install:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/events/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/extensions/install:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/networking/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/policy/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/rbac/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/scheduling/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/settings/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/storage/install:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/validation:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert/convert.go similarity index 87% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert/convert.go index 3d1302dc3614b..a233b8cd1cbbb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert/convert.go @@ -14,14 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package convert import ( "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/cli-runtime/pkg/genericclioptions" @@ -29,9 +30,9 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/resource" scheme "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "k8s.io/kubernetes/pkg/kubectl/validation" ) @@ -89,11 +90,11 @@ func NewCmdConvert(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *co o := NewConvertOptions(ioStreams) cmd := &cobra.Command{ - Use: "convert -f FILENAME", + Use: "convert -f FILENAME", DisableFlagsInUseLine: true, - Short: i18n.T("Convert config files between different API versions"), - Long: convert_long, - Example: convert_example, + Short: i18n.T("Convert config files between different API versions"), + Long: convert_long, + Example: convert_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd)) cmdutil.CheckErr(o.RunConvert()) @@ -133,6 +134,14 @@ func (o *ConvertOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) (err er // RunConvert implements the generic Convert command func (o *ConvertOptions) RunConvert() error { + + // Convert must be removed from kubectl, since kubectl can not depend on + // Kubernetes "internal" dependencies. These "internal" dependencies can + // not be removed from convert. Another way to convert a resource is to + // "kubectl apply" it to the cluster, then "kubectl get" at the desired version. + // Another possible solution is to make convert a plugin. + fmt.Fprintf(o.ErrOut, "kubectl convert is DEPRECATED and will be removed in a future version.\nIn order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version.\n") + b := o.builder(). WithScheme(scheme.Scheme). LocalParam(o.local) @@ -173,7 +182,9 @@ func (o *ConvertOptions) RunConvert() error { } } - objects, err := asVersionedObject(infos, !singleItemImplied, specifiedOutputVersion, cmdutil.InternalVersionJSONEncoder()) + internalEncoder := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) + internalVersionJSONEncoder := unstructured.JSONFallbackEncoder{Encoder: internalEncoder} + objects, err := asVersionedObject(infos, !singleItemImplied, specifiedOutputVersion, internalVersionJSONEncoder) if err != nil { return err } @@ -215,7 +226,7 @@ func asVersionedObject(infos []*resource.Info, forceList bool, specifiedOutputVe if len(actualVersion.Version) > 0 { defaultVersionInfo = fmt.Sprintf("Defaulting to %q", actualVersion.Version) } - glog.V(1).Infof("info: the output version specified is invalid. %s\n", defaultVersionInfo) + klog.V(1).Infof("info: the output version specified is invalid. %s\n", defaultVersionInfo) } return object, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert/import_known_versions.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert/import_known_versions.go new file mode 100644 index 0000000000000..2da5f1aadd1c7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/convert/import_known_versions.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package convert + +// These imports are the API groups the client will support. +// TODO: Remove these manual install once we don't need legacy scheme in convert +import ( + _ "k8s.io/kubernetes/pkg/apis/apps/install" + _ "k8s.io/kubernetes/pkg/apis/authentication/install" + _ "k8s.io/kubernetes/pkg/apis/authorization/install" + _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" + _ "k8s.io/kubernetes/pkg/apis/batch/install" + _ "k8s.io/kubernetes/pkg/apis/certificates/install" + _ "k8s.io/kubernetes/pkg/apis/coordination/install" + _ "k8s.io/kubernetes/pkg/apis/core/install" + _ "k8s.io/kubernetes/pkg/apis/events/install" + _ "k8s.io/kubernetes/pkg/apis/extensions/install" + _ "k8s.io/kubernetes/pkg/apis/policy/install" + _ "k8s.io/kubernetes/pkg/apis/rbac/install" + _ "k8s.io/kubernetes/pkg/apis/scheduling/install" + _ "k8s.io/kubernetes/pkg/apis/settings/install" + _ "k8s.io/kubernetes/pkg/apis/storage/install" +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cp/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cp/BUILD.bazel new file mode 100644 index 0000000000000..8efd5008519a0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cp/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["cp.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cp", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/cp", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/renstrom/dedent:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cp.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cp/cp.go similarity index 79% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cp.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cp/cp.go index e7d73d081852d..78e9f05066280 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cp.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/cp/cp.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package cp import ( "archive/tar" @@ -30,9 +30,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + "k8s.io/kubernetes/pkg/kubectl/cmd/exec" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "bytes" @@ -66,8 +67,9 @@ var ( ) type CopyOptions struct { - Container string - Namespace string + Container string + Namespace string + NoPreserve bool ClientConfig *restclient.Config Clientset kubernetes.Interface @@ -86,17 +88,18 @@ func NewCmdCp(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.C o := NewCopyOptions(ioStreams) cmd := &cobra.Command{ - Use: "cp ", + Use: "cp ", DisableFlagsInUseLine: true, - Short: i18n.T("Copy files and directories to and from containers."), - Long: "Copy files and directories to and from containers.", - Example: cpExample, + Short: i18n.T("Copy files and directories to and from containers."), + Long: "Copy files and directories to and from containers.", + Example: cpExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd)) cmdutil.CheckErr(o.Run(args)) }, } cmd.Flags().StringVarP(&o.Container, "container", "c", o.Container, "Container name. If omitted, the first container in the pod will be chosen") + cmd.Flags().BoolVarP(&o.NoPreserve, "no-preserve", "", false, "The copied file/directory's ownership and permissions will not be preserved in the container") return cmd } @@ -178,7 +181,7 @@ func (o *CopyOptions) Run(args []string) error { if len(srcSpec.PodName) != 0 && len(destSpec.PodName) != 0 { if _, err := os.Stat(args[0]); err == nil { - return o.copyToPod(fileSpec{File: args[0]}, destSpec) + return o.copyToPod(fileSpec{File: args[0]}, destSpec, &exec.ExecOptions{}) } return fmt.Errorf("src doesn't exist in local filesystem") } @@ -187,7 +190,7 @@ func (o *CopyOptions) Run(args []string) error { return o.copyFromPod(srcSpec, destSpec) } if len(destSpec.PodName) != 0 { - return o.copyToPod(srcSpec, destSpec) + return o.copyToPod(srcSpec, destSpec, &exec.ExecOptions{}) } return fmt.Errorf("one of src or dest must be a remote file specification") } @@ -197,8 +200,8 @@ func (o *CopyOptions) Run(args []string) error { // pod. If the destination path does not exist or is _not_ a // directory, an error is returned with the exit code received. func (o *CopyOptions) checkDestinationIsDir(dest fileSpec) error { - options := &ExecOptions{ - StreamOptions: StreamOptions{ + options := &exec.ExecOptions{ + StreamOptions: exec.StreamOptions{ IOStreams: genericclioptions.IOStreams{ Out: bytes.NewBuffer([]byte{}), ErrOut: bytes.NewBuffer([]byte{}), @@ -209,13 +212,13 @@ func (o *CopyOptions) checkDestinationIsDir(dest fileSpec) error { }, Command: []string{"test", "-d", dest.File}, - Executor: &DefaultRemoteExecutor{}, + Executor: &exec.DefaultRemoteExecutor{}, } return o.execute(options) } -func (o *CopyOptions) copyToPod(src, dest fileSpec) error { +func (o *CopyOptions) copyToPod(src, dest fileSpec, options *exec.ExecOptions) error { if len(src.File) == 0 || len(dest.File) == 0 { return errFileCannotBeEmpty } @@ -237,30 +240,33 @@ func (o *CopyOptions) copyToPod(src, dest fileSpec) error { err := makeTar(src.File, dest.File, writer) cmdutil.CheckErr(err) }() + var cmdArr []string // TODO: Improve error messages by first testing if 'tar' is present in the container? - cmdArr := []string{"tar", "xf", "-"} + if o.NoPreserve { + cmdArr = []string{"tar", "--no-same-permissions", "--no-same-owner", "-xf", "-"} + } else { + cmdArr = []string{"tar", "-xf", "-"} + } destDir := path.Dir(dest.File) if len(destDir) > 0 { cmdArr = append(cmdArr, "-C", destDir) } - options := &ExecOptions{ - StreamOptions: StreamOptions{ - IOStreams: genericclioptions.IOStreams{ - In: reader, - Out: o.Out, - ErrOut: o.ErrOut, - }, - Stdin: true, - - Namespace: dest.PodNamespace, - PodName: dest.PodName, + options.StreamOptions = exec.StreamOptions{ + IOStreams: genericclioptions.IOStreams{ + In: reader, + Out: o.Out, + ErrOut: o.ErrOut, }, + Stdin: true, - Command: cmdArr, - Executor: &DefaultRemoteExecutor{}, + Namespace: dest.PodNamespace, + PodName: dest.PodName, } + + options.Command = cmdArr + options.Executor = &exec.DefaultRemoteExecutor{} return o.execute(options) } @@ -270,8 +276,8 @@ func (o *CopyOptions) copyFromPod(src, dest fileSpec) error { } reader, outStream := io.Pipe() - options := &ExecOptions{ - StreamOptions: StreamOptions{ + options := &exec.ExecOptions{ + StreamOptions: exec.StreamOptions{ IOStreams: genericclioptions.IOStreams{ In: nil, Out: outStream, @@ -284,7 +290,7 @@ func (o *CopyOptions) copyFromPod(src, dest fileSpec) error { // TODO: Improve error messages by first testing if 'tar' is present in the container? Command: []string{"tar", "cf", "-", src.File}, - Executor: &DefaultRemoteExecutor{}, + Executor: &exec.DefaultRemoteExecutor{}, } go func() { @@ -296,12 +302,24 @@ func (o *CopyOptions) copyFromPod(src, dest fileSpec) error { // remove extraneous path shortcuts - these could occur if a path contained extra "../" // and attempted to navigate beyond "/" in a remote filesystem prefix = stripPathShortcuts(prefix) - return untarAll(reader, dest.File, prefix) + return o.untarAll(reader, dest.File, prefix) } // stripPathShortcuts removes any leading or trailing "../" from a given path func stripPathShortcuts(p string) string { newPath := path.Clean(p) + trimmed := strings.TrimPrefix(newPath, "../") + + for trimmed != newPath { + newPath = trimmed + trimmed = strings.TrimPrefix(newPath, "../") + } + + // trim leftover ".." + if newPath == ".." { + newPath = "" + } + if len(newPath) > 0 && string(newPath[0]) == "/" { return newPath[1:] } @@ -389,7 +407,7 @@ func clean(fileName string) string { return path.Clean(string(os.PathSeparator) + fileName) } -func untarAll(reader io.Reader, destFile, prefix string) error { +func (o *CopyOptions) untarAll(reader io.Reader, destFile, prefix string) error { entrySeq := -1 // TODO: use compression here? @@ -404,6 +422,12 @@ func untarAll(reader io.Reader, destFile, prefix string) error { } entrySeq++ mode := header.FileInfo().Mode() + // all the files will start with the prefix, which is the directory where + // they were located on the pod, we need to strip down that prefix, but + // if the prefix is missing it means the tar was tempered with + if !strings.HasPrefix(header.Name, prefix) { + return fmt.Errorf("tar contents corrupted") + } outFileName := path.Join(destFile, clean(header.Name[len(prefix):])) baseName := path.Dir(outFileName) if err := os.MkdirAll(baseName, 0755); err != nil { @@ -428,8 +452,16 @@ func untarAll(reader io.Reader, destFile, prefix string) error { } if mode&os.ModeSymlink != 0 { - err := os.Symlink(header.Linkname, outFileName) - if err != nil { + linkname := header.Linkname + // error is returned if linkname can't be made relative to destFile, + // but relative can end up being ../dir that's why we also need to + // verify if relative path is the same after Clean-ing + relative, err := filepath.Rel(destFile, linkname) + if path.IsAbs(linkname) && (err != nil || relative != stripPathShortcuts(relative)) { + fmt.Fprintf(o.IOStreams.ErrOut, "warning: link %q is pointing to %q which is outside target destination, skipping\n", outFileName, header.Linkname) + continue + } + if err := os.Symlink(linkname, outFileName); err != nil { return err } } else { @@ -460,7 +492,7 @@ func getPrefix(file string) string { return strings.TrimLeft(file, "/") } -func (o *CopyOptions) execute(options *ExecOptions) error { +func (o *CopyOptions) execute(options *exec.ExecOptions) error { if len(options.Namespace) == 0 { options.Namespace = o.Namespace } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/BUILD.bazel index 0dc975b602f82..25a51571a4200 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/BUILD.bazel @@ -23,7 +23,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/create", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/batch/v1beta1:go_default_library", @@ -37,16 +36,19 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/generate:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/printers:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create.go index 6e5fe28e1ffb3..1a426bae61611 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create.go @@ -24,8 +24,8 @@ import ( "runtime" "strings" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -33,15 +33,16 @@ import ( kruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/client-go/dynamic" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor" + "k8s.io/kubernetes/pkg/kubectl/generate" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) type CreateOptions struct { @@ -93,11 +94,11 @@ func NewCmdCreate(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cob o := NewCreateOptions(ioStreams) cmd := &cobra.Command{ - Use: "create -f FILENAME", + Use: "create -f FILENAME", DisableFlagsInUseLine: true, - Short: i18n.T("Create a resource from a file or from stdin."), - Long: createLong, - Example: createExample, + Short: i18n.T("Create a resource from a file or from stdin."), + Long: createLong, + Example: createExample, Run: func(cmd *cobra.Command, args []string) { if cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames) { defaultRunFunc := cmdutil.DefaultSubCommandRun(ioStreams.ErrOut) @@ -241,12 +242,12 @@ func (o *CreateOptions) RunCreate(f cmdutil.Factory, cmd *cobra.Command) error { if err != nil { return err } - if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), info.Object, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), info.Object, scheme.DefaultJSONEncoder()); err != nil { return cmdutil.AddSourceToErr("creating", info.Source, err) } if err := o.Recorder.Record(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } if !o.DryRun { @@ -346,7 +347,7 @@ type CreateSubcommandOptions struct { // Name of resource being created Name string // StructuredGenerator is the resource generator for the object being created - StructuredGenerator kubectl.StructuredGenerator + StructuredGenerator generate.StructuredGenerator // DryRun is true if the command should be simulated but not run against the server DryRun bool CreateAnnotation bool @@ -369,7 +370,7 @@ func NewCreateSubcommandOptions(ioStreams genericclioptions.IOStreams) *CreateSu } } -func (o *CreateSubcommandOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string, generator kubectl.StructuredGenerator) error { +func (o *CreateSubcommandOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string, generator generate.StructuredGenerator) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { return err @@ -428,7 +429,7 @@ func (o *CreateSubcommandOptions) Run() error { return err } - if err := kubectl.CreateOrUpdateAnnotation(o.CreateAnnotation, obj, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(o.CreateAnnotation, obj, scheme.DefaultJSONEncoder()); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_clusterrole.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_clusterrole.go index ccaaab0fab886..ce1700282758b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_clusterrole.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_clusterrole.go @@ -26,9 +26,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -71,11 +71,11 @@ func NewCmdCreateClusterRole(f cmdutil.Factory, ioStreams genericclioptions.IOSt AggregationRule: map[string]string{}, } cmd := &cobra.Command{ - Use: "clusterrole NAME --verb=verb --resource=resource.group [--resource-name=resourcename] [--dry-run]", + Use: "clusterrole NAME --verb=verb --resource=resource.group [--resource-name=resourcename] [--dry-run]", DisableFlagsInUseLine: true, - Short: clusterRoleLong, - Long: clusterRoleLong, - Example: clusterRoleExample, + Short: clusterRoleLong, + Long: clusterRoleLong, + Example: clusterRoleExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(c.Complete(f, cmd, args)) cmdutil.CheckErr(c.Validate()) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_clusterrolebinding.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_clusterrolebinding.go index e345fb2065cd0..882da888cc3ce 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_clusterrolebinding.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_clusterrolebinding.go @@ -20,10 +20,11 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -46,11 +47,11 @@ func NewCmdCreateClusterRoleBinding(f cmdutil.Factory, ioStreams genericclioptio } cmd := &cobra.Command{ - Use: "clusterrolebinding NAME --clusterrole=NAME [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run]", + Use: "clusterrolebinding NAME --clusterrole=NAME [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run]", DisableFlagsInUseLine: true, - Short: i18n.T("Create a ClusterRoleBinding for a particular ClusterRole"), - Long: clusterRoleBindingLong, - Example: clusterRoleBindingExample, + Short: i18n.T("Create a ClusterRoleBinding for a particular ClusterRole"), + Long: clusterRoleBindingLong, + Example: clusterRoleBindingExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(f, cmd, args)) cmdutil.CheckErr(options.Run()) @@ -61,12 +62,12 @@ func NewCmdCreateClusterRoleBinding(f cmdutil.Factory, ioStreams genericclioptio cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.ClusterRoleBindingV1GeneratorName) + cmdutil.AddGeneratorFlags(cmd, generateversioned.ClusterRoleBindingV1GeneratorName) cmd.Flags().String("clusterrole", "", i18n.T("ClusterRole this ClusterRoleBinding should reference")) cmd.MarkFlagCustom("clusterrole", "__kubectl_get_resource_clusterrole") - cmd.Flags().StringArray("user", []string{}, "Usernames to bind to the role") - cmd.Flags().StringArray("group", []string{}, "Groups to bind to the role") - cmd.Flags().StringArray("serviceaccount", []string{}, "Service accounts to bind to the role, in the format :") + cmd.Flags().StringArray("user", []string{}, "Usernames to bind to the clusterrole") + cmd.Flags().StringArray("group", []string{}, "Groups to bind to the clusterrole") + cmd.Flags().StringArray("serviceaccount", []string{}, "Service accounts to bind to the clusterrole, in the format :") return cmd } @@ -76,10 +77,10 @@ func (o *ClusterRoleBindingOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, return err } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.ClusterRoleBindingV1GeneratorName: - generator = &kubectl.ClusterRoleBindingGeneratorV1{ + case generateversioned.ClusterRoleBindingV1GeneratorName: + generator = &generateversioned.ClusterRoleBindingGeneratorV1{ Name: name, ClusterRole: cmdutil.GetFlagString(cmd, "clusterrole"), Users: cmdutil.GetFlagStringArray(cmd, "user"), diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_configmap.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_configmap.go index 2fa97b6e06a7d..4837d658f9e32 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_configmap.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_configmap.go @@ -20,10 +20,11 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -67,7 +68,7 @@ func NewCmdCreateConfigMap(f cmdutil.Factory, ioStreams genericclioptions.IOStre } cmd := &cobra.Command{ - Use: "configmap NAME [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]", + Use: "configmap NAME [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]", DisableFlagsInUseLine: true, Aliases: []string{"cm"}, Short: i18n.T("Create a configmap from a local file, directory or literal value"), @@ -83,7 +84,7 @@ func NewCmdCreateConfigMap(f cmdutil.Factory, ioStreams genericclioptions.IOStre cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.ConfigMapV1GeneratorName) + cmdutil.AddGeneratorFlags(cmd, generateversioned.ConfigMapV1GeneratorName) cmd.Flags().StringSlice("from-file", []string{}, "Key file can be specified using its file path, in which case file basename will be used as configmap key, or optionally with a key and file path, in which case the given key will be used. Specifying a directory will iterate each named file in the directory whose basename is a valid configmap key.") cmd.Flags().StringArray("from-literal", []string{}, "Specify a key and literal value to insert in configmap (i.e. mykey=somevalue)") cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a configmap (i.e. a Docker .env file).") @@ -97,10 +98,10 @@ func (o *ConfigMapOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s return err } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.ConfigMapV1GeneratorName: - generator = &kubectl.ConfigMapGeneratorV1{ + case generateversioned.ConfigMapV1GeneratorName: + generator = &generateversioned.ConfigMapGeneratorV1{ Name: name, FileSources: cmdutil.GetFlagStringSlice(cmd, "from-file"), LiteralSources: cmdutil.GetFlagStringArray(cmd, "from-literal"), diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_deployment.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_deployment.go index 6f4da51a8ac14..1a069f73ff277 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_deployment.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_deployment.go @@ -20,10 +20,11 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -48,7 +49,7 @@ func NewCmdCreateDeployment(f cmdutil.Factory, ioStreams genericclioptions.IOStr } cmd := &cobra.Command{ - Use: "deployment NAME --image=image [--dry-run]", + Use: "deployment NAME --image=image [--dry-run]", DisableFlagsInUseLine: true, Aliases: []string{"deploy"}, Short: i18n.T("Create a deployment with the specified name."), @@ -77,30 +78,30 @@ func generatorFromName( generatorName string, imageNames []string, deploymentName string, -) (kubectl.StructuredGenerator, bool) { +) (generate.StructuredGenerator, bool) { switch generatorName { - case cmdutil.DeploymentBasicAppsV1GeneratorName: - generator := &kubectl.DeploymentBasicAppsGeneratorV1{ - BaseDeploymentGenerator: kubectl.BaseDeploymentGenerator{ + case generateversioned.DeploymentBasicAppsV1GeneratorName: + generator := &generateversioned.DeploymentBasicAppsGeneratorV1{ + BaseDeploymentGenerator: generateversioned.BaseDeploymentGenerator{ Name: deploymentName, Images: imageNames, }, } return generator, true - case cmdutil.DeploymentBasicAppsV1Beta1GeneratorName: - generator := &kubectl.DeploymentBasicAppsGeneratorV1Beta1{ - BaseDeploymentGenerator: kubectl.BaseDeploymentGenerator{ + case generateversioned.DeploymentBasicAppsV1Beta1GeneratorName: + generator := &generateversioned.DeploymentBasicAppsGeneratorV1Beta1{ + BaseDeploymentGenerator: generateversioned.BaseDeploymentGenerator{ Name: deploymentName, Images: imageNames, }, } return generator, true - case cmdutil.DeploymentBasicV1Beta1GeneratorName: - generator := &kubectl.DeploymentBasicGeneratorV1{ - BaseDeploymentGenerator: kubectl.BaseDeploymentGenerator{ + case generateversioned.DeploymentBasicV1Beta1GeneratorName: + generator := &generateversioned.DeploymentBasicGeneratorV1{ + BaseDeploymentGenerator: generateversioned.BaseDeploymentGenerator{ Name: deploymentName, Images: imageNames, }, @@ -125,8 +126,8 @@ func (o *DeploymentOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args [] generatorName := cmdutil.GetFlagString(cmd, "generator") if len(generatorName) == 0 { - generatorName = cmdutil.DeploymentBasicAppsV1GeneratorName - generatorNameTemp, err := cmdutil.FallbackGeneratorNameIfNecessary(generatorName, clientset.Discovery(), o.CreateSubcommandOptions.ErrOut) + generatorName = generateversioned.DeploymentBasicAppsV1GeneratorName + generatorNameTemp, err := generateversioned.FallbackGeneratorNameIfNecessary(generatorName, clientset.Discovery(), o.CreateSubcommandOptions.ErrOut) if err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_job.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_job.go index b0665ee27318a..a82348e1b8c71 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_job.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_job.go @@ -29,10 +29,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/resource" batchv1client "k8s.io/client-go/kubernetes/typed/batch/v1" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_namespace.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_namespace.go index 275f8a8f0c8fb..04ea190cf8ac4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_namespace.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_namespace.go @@ -20,10 +20,11 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -46,7 +47,7 @@ func NewCmdCreateNamespace(f cmdutil.Factory, ioStreams genericclioptions.IOStre } cmd := &cobra.Command{ - Use: "namespace NAME [--dry-run]", + Use: "namespace NAME [--dry-run]", DisableFlagsInUseLine: true, Aliases: []string{"ns"}, Short: i18n.T("Create a namespace with the specified name"), @@ -62,7 +63,7 @@ func NewCmdCreateNamespace(f cmdutil.Factory, ioStreams genericclioptions.IOStre cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.NamespaceV1GeneratorName) + cmdutil.AddGeneratorFlags(cmd, generateversioned.NamespaceV1GeneratorName) return cmd } @@ -73,10 +74,10 @@ func (o *NamespaceOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s return err } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.NamespaceV1GeneratorName: - generator = &kubectl.NamespaceGeneratorV1{Name: name} + case generateversioned.NamespaceV1GeneratorName: + generator = &generateversioned.NamespaceGeneratorV1{Name: name} default: return errUnsupportedGenerator(cmd, generatorName) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_pdb.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_pdb.go index 9845cabc49caa..7a0f5b91af54a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_pdb.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_pdb.go @@ -20,10 +20,11 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -51,7 +52,7 @@ func NewCmdCreatePodDisruptionBudget(f cmdutil.Factory, ioStreams genericcliopti } cmd := &cobra.Command{ - Use: "poddisruptionbudget NAME --selector=SELECTOR --min-available=N [--dry-run]", + Use: "poddisruptionbudget NAME --selector=SELECTOR --min-available=N [--dry-run]", DisableFlagsInUseLine: true, Aliases: []string{"pdb"}, Short: i18n.T("Create a pod disruption budget with the specified name."), @@ -67,7 +68,7 @@ func NewCmdCreatePodDisruptionBudget(f cmdutil.Factory, ioStreams genericcliopti cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.PodDisruptionBudgetV2GeneratorName) + cmdutil.AddGeneratorFlags(cmd, generateversioned.PodDisruptionBudgetV2GeneratorName) cmd.Flags().String("min-available", "", i18n.T("The minimum number or percentage of available pods this budget requires.")) cmd.Flags().String("max-unavailable", "", i18n.T("The maximum number or percentage of unavailable pods this budget requires.")) @@ -81,16 +82,16 @@ func (o *PodDisruptionBudgetOpts) Complete(f cmdutil.Factory, cmd *cobra.Command return err } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.PodDisruptionBudgetV1GeneratorName: - generator = &kubectl.PodDisruptionBudgetV1Generator{ + case generateversioned.PodDisruptionBudgetV1GeneratorName: + generator = &generateversioned.PodDisruptionBudgetV1Generator{ Name: name, MinAvailable: cmdutil.GetFlagString(cmd, "min-available"), Selector: cmdutil.GetFlagString(cmd, "selector"), } - case cmdutil.PodDisruptionBudgetV2GeneratorName: - generator = &kubectl.PodDisruptionBudgetV2Generator{ + case generateversioned.PodDisruptionBudgetV2GeneratorName: + generator = &generateversioned.PodDisruptionBudgetV2Generator{ Name: name, MinAvailable: cmdutil.GetFlagString(cmd, "min-available"), MaxUnavailable: cmdutil.GetFlagString(cmd, "max-unavailable"), diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_priorityclass.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_priorityclass.go index 75c9dcfae3846..5cf7f52920453 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_priorityclass.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_priorityclass.go @@ -20,10 +20,11 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -49,7 +50,7 @@ func NewCmdCreatePriorityClass(f cmdutil.Factory, ioStreams genericclioptions.IO } cmd := &cobra.Command{ - Use: "priorityclass NAME --value=VALUE --global-default=BOOL [--dry-run]", + Use: "priorityclass NAME --value=VALUE --global-default=BOOL [--dry-run]", DisableFlagsInUseLine: true, Aliases: []string{"pc"}, Short: i18n.T("Create a priorityclass with the specified name."), @@ -65,7 +66,7 @@ func NewCmdCreatePriorityClass(f cmdutil.Factory, ioStreams genericclioptions.IO cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.PriorityClassV1Alpha1GeneratorName) + cmdutil.AddGeneratorFlags(cmd, generateversioned.PriorityClassV1Alpha1GeneratorName) cmd.Flags().Int32("value", 0, i18n.T("the value of this priority class.")) cmd.Flags().Bool("global-default", false, i18n.T("global-default specifies whether this PriorityClass should be considered as the default priority.")) @@ -79,10 +80,10 @@ func (o *PriorityClassOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args return err } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.PriorityClassV1Alpha1GeneratorName: - generator = &kubectl.PriorityClassV1Generator{ + case generateversioned.PriorityClassV1Alpha1GeneratorName: + generator = &generateversioned.PriorityClassV1Generator{ Name: name, Value: cmdutil.GetFlagInt32(cmd, "value"), GlobalDefault: cmdutil.GetFlagBool(cmd, "global-default"), diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_quota.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_quota.go index 233ac906d0db4..95676a18303fb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_quota.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_quota.go @@ -20,10 +20,11 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -49,7 +50,7 @@ func NewCmdCreateQuota(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) } cmd := &cobra.Command{ - Use: "quota NAME [--hard=key1=value1,key2=value2] [--scopes=Scope1,Scope2] [--dry-run=bool]", + Use: "quota NAME [--hard=key1=value1,key2=value2] [--scopes=Scope1,Scope2] [--dry-run=bool]", DisableFlagsInUseLine: true, Aliases: []string{"resourcequota"}, Short: i18n.T("Create a quota with the specified name."), @@ -65,7 +66,7 @@ func NewCmdCreateQuota(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.ResourceQuotaV1GeneratorName) + cmdutil.AddGeneratorFlags(cmd, generateversioned.ResourceQuotaV1GeneratorName) cmd.Flags().String("hard", "", i18n.T("A comma-delimited set of resource=quantity pairs that define a hard limit.")) cmd.Flags().String("scopes", "", i18n.T("A comma-delimited set of quota scopes that must all match each object tracked by the quota.")) return cmd @@ -77,10 +78,10 @@ func (o *QuotaOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []strin return err } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.ResourceQuotaV1GeneratorName: - generator = &kubectl.ResourceQuotaGeneratorV1{ + case generateversioned.ResourceQuotaV1GeneratorName: + generator = &generateversioned.ResourceQuotaGeneratorV1{ Name: name, Hard: cmdutil.GetFlagString(cmd, "hard"), Scopes: cmdutil.GetFlagString(cmd, "scopes"), diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_role.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_role.go index 42a5276b6944f..7124c509e80f0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_role.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_role.go @@ -30,10 +30,10 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/cli-runtime/pkg/genericclioptions" clientgorbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -142,11 +142,11 @@ func NewCmdCreateRole(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) o := NewCreateRoleOptions(ioStreams) cmd := &cobra.Command{ - Use: "role NAME --verb=verb --resource=resource.group/subresource [--resource-name=resourcename] [--dry-run]", + Use: "role NAME --verb=verb --resource=resource.group/subresource [--resource-name=resourcename] [--dry-run]", DisableFlagsInUseLine: true, - Short: roleLong, - Long: roleLong, - Example: roleExample, + Short: roleLong, + Long: roleLong, + Example: roleExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_rolebinding.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_rolebinding.go index e6e6dfd4ad5fe..4b8f2b28ed6f2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_rolebinding.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_rolebinding.go @@ -20,10 +20,11 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -46,11 +47,11 @@ func NewCmdCreateRoleBinding(f cmdutil.Factory, ioStreams genericclioptions.IOSt } cmd := &cobra.Command{ - Use: "rolebinding NAME --clusterrole=NAME|--role=NAME [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run]", + Use: "rolebinding NAME --clusterrole=NAME|--role=NAME [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run]", DisableFlagsInUseLine: true, - Short: i18n.T("Create a RoleBinding for a particular Role or ClusterRole"), - Long: roleBindingLong, - Example: roleBindingExample, + Short: i18n.T("Create a RoleBinding for a particular Role or ClusterRole"), + Long: roleBindingLong, + Example: roleBindingExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(f, cmd, args)) cmdutil.CheckErr(options.Run()) @@ -61,7 +62,7 @@ func NewCmdCreateRoleBinding(f cmdutil.Factory, ioStreams genericclioptions.IOSt cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.RoleBindingV1GeneratorName) + cmdutil.AddGeneratorFlags(cmd, generateversioned.RoleBindingV1GeneratorName) cmd.Flags().String("clusterrole", "", i18n.T("ClusterRole this RoleBinding should reference")) cmd.Flags().String("role", "", i18n.T("Role this RoleBinding should reference")) cmd.Flags().StringArray("user", []string{}, "Usernames to bind to the role") @@ -76,10 +77,10 @@ func (o *RoleBindingOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ return err } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.RoleBindingV1GeneratorName: - generator = &kubectl.RoleBindingGeneratorV1{ + case generateversioned.RoleBindingV1GeneratorName: + generator = &generateversioned.RoleBindingGeneratorV1{ Name: name, ClusterRole: cmdutil.GetFlagString(cmd, "clusterrole"), Role: cmdutil.GetFlagString(cmd, "role"), diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_secret.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_secret.go index 793c7f4c1ff85..4512dffbda831 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_secret.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_secret.go @@ -20,10 +20,11 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // NewCmdCreateSecret groups subcommands to create various types of secrets @@ -83,11 +84,11 @@ func NewCmdCreateSecretGeneric(f cmdutil.Factory, ioStreams genericclioptions.IO } cmd := &cobra.Command{ - Use: "generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]", + Use: "generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]", DisableFlagsInUseLine: true, - Short: i18n.T("Create a secret from a local file, directory or literal value"), - Long: secretLong, - Example: secretExample, + Short: i18n.T("Create a secret from a local file, directory or literal value"), + Long: secretLong, + Example: secretExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(f, cmd, args)) cmdutil.CheckErr(options.Run()) @@ -98,7 +99,7 @@ func NewCmdCreateSecretGeneric(f cmdutil.Factory, ioStreams genericclioptions.IO cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretV1GeneratorName) + cmdutil.AddGeneratorFlags(cmd, generateversioned.SecretV1GeneratorName) cmd.Flags().StringSlice("from-file", []string{}, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.") cmd.Flags().StringArray("from-literal", []string{}, "Specify a key and literal value to insert in secret (i.e. mykey=somevalue)") cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a secret (i.e. a Docker .env file).") @@ -113,10 +114,10 @@ func (o *SecretGenericOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args return err } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.SecretV1GeneratorName: - generator = &kubectl.SecretGeneratorV1{ + case generateversioned.SecretV1GeneratorName: + generator = &generateversioned.SecretGeneratorV1{ Name: name, Type: cmdutil.GetFlagString(cmd, "type"), FileSources: cmdutil.GetFlagStringSlice(cmd, "from-file"), @@ -168,11 +169,11 @@ func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, ioStreams genericcliopt } cmd := &cobra.Command{ - Use: "docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-literal=key1=value1] [--dry-run]", + Use: "docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-literal=key1=value1] [--dry-run]", DisableFlagsInUseLine: true, - Short: i18n.T("Create a secret for use with a Docker registry"), - Long: secretForDockerRegistryLong, - Example: secretForDockerRegistryExample, + Short: i18n.T("Create a secret for use with a Docker registry"), + Long: secretForDockerRegistryLong, + Example: secretForDockerRegistryExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(f, cmd, args)) cmdutil.CheckErr(options.Run()) @@ -183,7 +184,7 @@ func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, ioStreams genericcliopt cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForDockerRegistryV1GeneratorName) + cmdutil.AddGeneratorFlags(cmd, generateversioned.SecretForDockerRegistryV1GeneratorName) cmd.Flags().String("docker-username", "", i18n.T("Username for Docker registry authentication")) cmd.MarkFlagRequired("docker-username") cmd.Flags().String("docker-password", "", i18n.T("Password for Docker registry authentication")) @@ -212,10 +213,10 @@ func (o *SecretDockerRegistryOpts) Complete(f cmdutil.Factory, cmd *cobra.Comman } } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.SecretForDockerRegistryV1GeneratorName: - generator = &kubectl.SecretForDockerRegistryGeneratorV1{ + case generateversioned.SecretForDockerRegistryV1GeneratorName: + generator = &generateversioned.SecretForDockerRegistryGeneratorV1{ Name: name, Username: cmdutil.GetFlagString(cmd, "docker-username"), Email: cmdutil.GetFlagString(cmd, "docker-email"), @@ -259,11 +260,11 @@ func NewCmdCreateSecretTLS(f cmdutil.Factory, ioStreams genericclioptions.IOStre } cmd := &cobra.Command{ - Use: "tls NAME --cert=path/to/cert/file --key=path/to/key/file [--dry-run]", + Use: "tls NAME --cert=path/to/cert/file --key=path/to/key/file [--dry-run]", DisableFlagsInUseLine: true, - Short: i18n.T("Create a TLS secret"), - Long: secretForTLSLong, - Example: secretForTLSExample, + Short: i18n.T("Create a TLS secret"), + Long: secretForTLSLong, + Example: secretForTLSExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(f, cmd, args)) cmdutil.CheckErr(options.Run()) @@ -274,7 +275,7 @@ func NewCmdCreateSecretTLS(f cmdutil.Factory, ioStreams genericclioptions.IOStre cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForTLSV1GeneratorName) + cmdutil.AddGeneratorFlags(cmd, generateversioned.SecretForTLSV1GeneratorName) cmd.Flags().String("cert", "", i18n.T("Path to PEM encoded public key certificate.")) cmd.Flags().String("key", "", i18n.T("Path to private key associated with given certificate.")) cmd.Flags().Bool("append-hash", false, "Append a hash of the secret to its name.") @@ -293,10 +294,10 @@ func (o *SecretTLSOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s return cmdutil.UsageErrorf(cmd, "flag %s is required", requiredFlag) } } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.SecretForTLSV1GeneratorName: - generator = &kubectl.SecretForTLSGeneratorV1{ + case generateversioned.SecretForTLSV1GeneratorName: + generator = &generateversioned.SecretForTLSGeneratorV1{ Name: name, Key: cmdutil.GetFlagString(cmd, "key"), Cert: cmdutil.GetFlagString(cmd, "cert"), diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_service.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_service.go index fa8f631547282..ee34f1814f7de 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_service.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_service.go @@ -21,10 +21,11 @@ import ( "k8s.io/api/core/v1" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // NewCmdCreateService is a macro command to create a new service @@ -71,11 +72,11 @@ func NewCmdCreateServiceClusterIP(f cmdutil.Factory, ioStreams genericclioptions } cmd := &cobra.Command{ - Use: "clusterip NAME [--tcp=:] [--dry-run]", + Use: "clusterip NAME [--tcp=:] [--dry-run]", DisableFlagsInUseLine: true, - Short: i18n.T("Create a ClusterIP service."), - Long: serviceClusterIPLong, - Example: serviceClusterIPExample, + Short: i18n.T("Create a ClusterIP service."), + Long: serviceClusterIPLong, + Example: serviceClusterIPExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(f, cmd, args)) cmdutil.CheckErr(options.Run()) @@ -86,7 +87,7 @@ func NewCmdCreateServiceClusterIP(f cmdutil.Factory, ioStreams genericclioptions cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.ServiceClusterIPGeneratorV1Name) + cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceClusterIPGeneratorV1Name) addPortFlags(cmd) cmd.Flags().String("clusterip", "", i18n.T("Assign your own ClusterIP or set to 'None' for a 'headless' service (no loadbalancing).")) return cmd @@ -102,10 +103,10 @@ func (o *ServiceClusterIPOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, a return err } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.ServiceClusterIPGeneratorV1Name: - generator = &kubectl.ServiceCommonGeneratorV1{ + case generateversioned.ServiceClusterIPGeneratorV1Name: + generator = &generateversioned.ServiceCommonGeneratorV1{ Name: name, TCP: cmdutil.GetFlagStringSlice(cmd, "tcp"), Type: v1.ServiceTypeClusterIP, @@ -143,11 +144,11 @@ func NewCmdCreateServiceNodePort(f cmdutil.Factory, ioStreams genericclioptions. } cmd := &cobra.Command{ - Use: "nodeport NAME [--tcp=port:targetPort] [--dry-run]", + Use: "nodeport NAME [--tcp=port:targetPort] [--dry-run]", DisableFlagsInUseLine: true, - Short: i18n.T("Create a NodePort service."), - Long: serviceNodePortLong, - Example: serviceNodePortExample, + Short: i18n.T("Create a NodePort service."), + Long: serviceNodePortLong, + Example: serviceNodePortExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(f, cmd, args)) cmdutil.CheckErr(options.Run()) @@ -158,7 +159,7 @@ func NewCmdCreateServiceNodePort(f cmdutil.Factory, ioStreams genericclioptions. cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.ServiceNodePortGeneratorV1Name) + cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceNodePortGeneratorV1Name) cmd.Flags().Int("node-port", 0, "Port used to expose the service on each node in a cluster.") addPortFlags(cmd) return cmd @@ -170,10 +171,10 @@ func (o *ServiceNodePortOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, ar return err } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.ServiceNodePortGeneratorV1Name: - generator = &kubectl.ServiceCommonGeneratorV1{ + case generateversioned.ServiceNodePortGeneratorV1Name: + generator = &generateversioned.ServiceCommonGeneratorV1{ Name: name, TCP: cmdutil.GetFlagStringSlice(cmd, "tcp"), Type: v1.ServiceTypeNodePort, @@ -212,11 +213,11 @@ func NewCmdCreateServiceLoadBalancer(f cmdutil.Factory, ioStreams genericcliopti } cmd := &cobra.Command{ - Use: "loadbalancer NAME [--tcp=port:targetPort] [--dry-run]", + Use: "loadbalancer NAME [--tcp=port:targetPort] [--dry-run]", DisableFlagsInUseLine: true, - Short: i18n.T("Create a LoadBalancer service."), - Long: serviceLoadBalancerLong, - Example: serviceLoadBalancerExample, + Short: i18n.T("Create a LoadBalancer service."), + Long: serviceLoadBalancerLong, + Example: serviceLoadBalancerExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(f, cmd, args)) cmdutil.CheckErr(options.Run()) @@ -227,7 +228,7 @@ func NewCmdCreateServiceLoadBalancer(f cmdutil.Factory, ioStreams genericcliopti cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.ServiceLoadBalancerGeneratorV1Name) + cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceLoadBalancerGeneratorV1Name) addPortFlags(cmd) return cmd } @@ -238,10 +239,10 @@ func (o *ServiceLoadBalancerOpts) Complete(f cmdutil.Factory, cmd *cobra.Command return err } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.ServiceLoadBalancerGeneratorV1Name: - generator = &kubectl.ServiceCommonGeneratorV1{ + case generateversioned.ServiceLoadBalancerGeneratorV1Name: + generator = &generateversioned.ServiceCommonGeneratorV1{ Name: name, TCP: cmdutil.GetFlagStringSlice(cmd, "tcp"), Type: v1.ServiceTypeLoadBalancer, @@ -283,11 +284,11 @@ func NewCmdCreateServiceExternalName(f cmdutil.Factory, ioStreams genericcliopti } cmd := &cobra.Command{ - Use: "externalname NAME --external-name external.name [--dry-run]", + Use: "externalname NAME --external-name external.name [--dry-run]", DisableFlagsInUseLine: true, - Short: i18n.T("Create an ExternalName service."), - Long: serviceExternalNameLong, - Example: serviceExternalNameExample, + Short: i18n.T("Create an ExternalName service."), + Long: serviceExternalNameLong, + Example: serviceExternalNameExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(f, cmd, args)) cmdutil.CheckErr(options.Run()) @@ -298,7 +299,7 @@ func NewCmdCreateServiceExternalName(f cmdutil.Factory, ioStreams genericcliopti cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.ServiceExternalNameGeneratorV1Name) + cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceExternalNameGeneratorV1Name) addPortFlags(cmd) cmd.Flags().String("external-name", "", i18n.T("External name of service")) cmd.MarkFlagRequired("external-name") @@ -311,10 +312,10 @@ func (o *ServiceExternalNameOpts) Complete(f cmdutil.Factory, cmd *cobra.Command return err } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.ServiceExternalNameGeneratorV1Name: - generator = &kubectl.ServiceCommonGeneratorV1{ + case generateversioned.ServiceExternalNameGeneratorV1Name: + generator = &generateversioned.ServiceCommonGeneratorV1{ Name: name, Type: v1.ServiceTypeExternalName, ExternalName: cmdutil.GetFlagString(cmd, "external-name"), diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_serviceaccount.go index e001be9dc3f34..6909035610671 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_serviceaccount.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/create/create_serviceaccount.go @@ -20,10 +20,11 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -46,7 +47,7 @@ func NewCmdCreateServiceAccount(f cmdutil.Factory, ioStreams genericclioptions.I } cmd := &cobra.Command{ - Use: "serviceaccount NAME [--dry-run]", + Use: "serviceaccount NAME [--dry-run]", DisableFlagsInUseLine: true, Aliases: []string{"sa"}, Short: i18n.T("Create a service account with the specified name"), @@ -62,7 +63,7 @@ func NewCmdCreateServiceAccount(f cmdutil.Factory, ioStreams genericclioptions.I cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) - cmdutil.AddGeneratorFlags(cmd, cmdutil.ServiceAccountV1GeneratorName) + cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceAccountV1GeneratorName) return cmd } @@ -72,10 +73,10 @@ func (o *ServiceAccountOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, arg return err } - var generator kubectl.StructuredGenerator + var generator generate.StructuredGenerator switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { - case cmdutil.ServiceAccountV1GeneratorName: - generator = &kubectl.ServiceAccountGeneratorV1{Name: name} + case generateversioned.ServiceAccountV1GeneratorName: + generator = &generateversioned.ServiceAccountGeneratorV1{Name: name} default: return errUnsupportedGenerator(cmd, generatorName) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete/BUILD.bazel new file mode 100644 index 0000000000000..5344947b5d11c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete/BUILD.bazel @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "delete.go", + "delete_flags.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/delete", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/wait:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete/delete.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete/delete.go index 3f983f0d1b9b0..84b6db50bfe4b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete/delete.go @@ -14,15 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package delete import ( "fmt" "strings" "time" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -32,10 +32,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/client-go/dynamic" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" kubectlwait "k8s.io/kubernetes/pkg/kubectl/cmd/wait" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -118,15 +118,15 @@ func NewCmdDelete(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra deleteFlags := NewDeleteCommandFlags("containing the resource to delete.") cmd := &cobra.Command{ - Use: "delete ([-f FILENAME] | TYPE [(NAME | -l label | --all)])", + Use: "delete ([-f FILENAME] | TYPE [(NAME | -l label | --all)])", DisableFlagsInUseLine: true, - Short: i18n.T("Delete resources by filenames, stdin, resources and names, or by resources and label selector"), - Long: delete_long, - Example: delete_example, + Short: i18n.T("Delete resources by filenames, stdin, resources and names, or by resources and label selector"), + Long: delete_long, + Example: delete_example, Run: func(cmd *cobra.Command, args []string) { o := deleteFlags.ToOptions(nil, streams) cmdutil.CheckErr(o.Complete(f, args, cmd)) - cmdutil.CheckErr(o.Validate(cmd)) + cmdutil.CheckErr(o.Validate()) cmdutil.CheckErr(o.RunDelete()) }, SuggestFor: []string{"rm"}, @@ -194,9 +194,9 @@ func (o *DeleteOptions) Complete(f cmdutil.Factory, args []string, cmd *cobra.Co return nil } -func (o *DeleteOptions) Validate(cmd *cobra.Command) error { +func (o *DeleteOptions) Validate() error { if o.Output != "" && o.Output != "name" { - return cmdutil.UsageErrorf(cmd, "Unexpected -o output mode: %v. We only support '-o name'.", o.Output) + return fmt.Errorf("unexpected -o output mode: %v. We only support '-o name'.", o.Output) } if o.DeleteAll && len(o.LabelSelector) > 0 { @@ -206,11 +206,6 @@ func (o *DeleteOptions) Validate(cmd *cobra.Command) error { return fmt.Errorf("cannot set --all and --field-selector at the same time") } - if o.GracePeriod == 0 && !o.ForceDeletion && !o.WaitForDeletion { - // With the explicit --wait flag we need extra validation for backward compatibility - return fmt.Errorf("--grace-period=0 must have either --force specified, or --wait to be set to true") - } - switch { case o.GracePeriod == 0 && o.ForceDeletion: fmt.Fprintf(o.ErrOut, "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n") @@ -264,7 +259,7 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { responseMetadata, err := meta.Accessor(response) if err != nil { // we don't have UID, but we didn't fail the delete, next best thing is just skipping the UID - glog.V(1).Info(err) + klog.V(1).Info(err) return nil } uidMap[resourceLocation] = responseMetadata.GetUID() @@ -306,7 +301,7 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { if errors.IsForbidden(err) || errors.IsMethodNotSupported(err) { // if we're forbidden from waiting, we shouldn't fail. // if the resource doesn't support a verb we need, we shouldn't fail. - glog.V(1).Info(err) + klog.V(1).Info(err) return nil } return err diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete_flags.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete/delete_flags.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete_flags.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete/delete_flags.go index 94ae78e61e023..f6dcc74d95f9c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete_flags.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete/delete_flags.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package delete import ( "time" diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/describe/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/describe/BUILD.bazel new file mode 100644 index 0000000000000..27e58a67734bd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/describe/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["describe.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/describe", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/describe", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/describe:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/describe.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/describe/describe.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/describe.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/describe/describe.go index 774f613b7fbf0..c1c84cd1717ca 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/describe.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/describe/describe.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package describe import ( "fmt" @@ -28,10 +28,11 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/describe" + describeversioned "k8s.io/kubernetes/pkg/kubectl/describe/versioned" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -39,7 +40,7 @@ var ( Show details of a specific resource or group of resources Print a detailed description of the selected resources, including related resources such - as events or controllers. You may select a single object by name, all objects of that + as events or controllers. You may select a single object by name, all objects of that type, provide a name prefix, or label selector. For example: $ kubectl describe TYPE NAME_PREFIX @@ -73,7 +74,7 @@ type DescribeOptions struct { Selector string Namespace string - Describer func(*meta.RESTMapping) (printers.Describer, error) + Describer func(*meta.RESTMapping) (describe.Describer, error) NewBuilder func() *resource.Builder BuilderArgs []string @@ -82,7 +83,7 @@ type DescribeOptions struct { AllNamespaces bool IncludeUninitialized bool - DescriberSettings *printers.DescriberSettings + DescriberSettings *describe.DescriberSettings FilenameOptions *resource.FilenameOptions genericclioptions.IOStreams @@ -91,7 +92,7 @@ type DescribeOptions struct { func NewCmdDescribe(parent string, f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := &DescribeOptions{ FilenameOptions: &resource.FilenameOptions{}, - DescriberSettings: &printers.DescriberSettings{ + DescriberSettings: &describe.DescriberSettings{ ShowEvents: true, }, @@ -101,11 +102,11 @@ func NewCmdDescribe(parent string, f cmdutil.Factory, streams genericclioptions. } cmd := &cobra.Command{ - Use: "describe (-f FILENAME | TYPE [NAME_PREFIX | -l label] | TYPE/NAME)", + Use: "describe (-f FILENAME | TYPE [NAME_PREFIX | -l label] | TYPE/NAME)", DisableFlagsInUseLine: true, - Short: i18n.T("Show details of a specific resource or group of resources"), - Long: describeLong + "\n\n" + cmdutil.SuggestApiResources(parent), - Example: describeExample, + Short: i18n.T("Show details of a specific resource or group of resources"), + Long: describeLong + "\n\n" + cmdutil.SuggestApiResources(parent), + Example: describeExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Run()) @@ -137,8 +138,8 @@ func (o *DescribeOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ o.BuilderArgs = args - o.Describer = func(mapping *meta.RESTMapping) (printers.Describer, error) { - return cmdutil.DescriberFn(f, mapping) + o.Describer = func(mapping *meta.RESTMapping) (describe.Describer, error) { + return describeversioned.DescriberFn(f, mapping) } o.NewBuilder = f.NewBuilder diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff/BUILD.bazel new file mode 100644 index 0000000000000..110fe1fc93b83 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff/BUILD.bazel @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["diff.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/diff", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/jonboulle/clockwork:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff/diff.go similarity index 54% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff/diff.go index 33a5dc64f815b..1956cf1bb0f18 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff/diff.go @@ -14,94 +14,66 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package diff import ( - "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" - "github.com/ghodss/yaml" + "github.com/jonboulle/clockwork" "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/client-go/dynamic" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/kubectl/apply/parse" - "k8s.io/kubernetes/pkg/kubectl/apply/strategy" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/kubectl/cmd/apply" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" + "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "k8s.io/utils/exec" + "sigs.k8s.io/yaml" ) var ( diffLong = templates.LongDesc(i18n.T(` - Diff configurations specified by filename or stdin between their local, - last-applied, live and/or "merged" versions. - - LOCAL and LIVE versions are diffed by default. Other available keywords - are MERGED and LAST. + Diff configurations specified by filename or stdin between the current online + configuration, and the configuration as it would be if applied. Output is always YAML. - KUBERNETES_EXTERNAL_DIFF environment variable can be used to select your own + KUBECTL_EXTERNAL_DIFF environment variable can be used to select your own diff command. By default, the "diff" command available in your path will be run with "-u" (unicode) and "-N" (treat new files as empty) options.`)) diffExample = templates.Examples(i18n.T(` - # Diff resources included in pod.json. By default, it will diff LOCAL and LIVE versions - kubectl alpha diff -f pod.json - - # When one version is specified, diff that version against LIVE - cat service.yaml | kubectl alpha diff -f - MERGED + # Diff resources included in pod.json. + kubectl diff -f pod.json - # Or specify both versions - kubectl alpha diff -f pod.json -f service.yaml LAST LOCAL`)) + # Diff file read from stdin + cat service.yaml | kubectl diff -f -`)) ) +// Number of times we try to diff before giving-up +const maxRetries = 4 + type DiffOptions struct { FilenameOptions resource.FilenameOptions } -func isValidArgument(arg string) error { - switch arg { - case "LOCAL", "LIVE", "LAST", "MERGED": - return nil - default: - return fmt.Errorf(`Invalid parameter %q, must be either "LOCAL", "LIVE", "LAST" or "MERGED"`, arg) - } - -} - -func parseDiffArguments(args []string) (string, string, error) { - if len(args) > 2 { - return "", "", fmt.Errorf("Invalid number of arguments: expected at most 2.") - } - // Default values - from := "LOCAL" - to := "LIVE" - if len(args) > 0 { - from = args[0] - } - if len(args) > 1 { - to = args[1] +func checkDiffArgs(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return cmdutil.UsageErrorf(cmd, "Unexpected args: %v", args) } - - if err := isValidArgument(to); err != nil { - return "", "", err - } - if err := isValidArgument(from); err != nil { - return "", "", err - } - - return from, to, nil + return nil } func NewCmdDiff(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { @@ -111,15 +83,14 @@ func NewCmdDiff(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.C IOStreams: streams, } cmd := &cobra.Command{ - Use: "diff -f FILENAME", + Use: "diff -f FILENAME", DisableFlagsInUseLine: true, - Short: i18n.T("Diff different versions of configurations"), - Long: diffLong, - Example: diffExample, + Short: i18n.T("Diff live version against would-be applied version"), + Long: diffLong, + Example: diffExample, Run: func(cmd *cobra.Command, args []string) { - from, to, err := parseDiffArguments(args) - cmdutil.CheckErr(err) - cmdutil.CheckErr(RunDiff(f, &diff, &options, from, to)) + cmdutil.CheckErr(checkDiffArgs(cmd, args)) + cmdutil.CheckErr(RunDiff(f, &diff, &options)) }, } @@ -131,7 +102,7 @@ func NewCmdDiff(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.C } // DiffProgram finds and run the diff program. The value of -// KUBERNETES_EXTERNAL_DIFF environment variable will be used a diff +// KUBECTL_EXTERNAL_DIFF environment variable will be used a diff // program. By default, `diff(1)` will be used. type DiffProgram struct { Exec exec.Interface @@ -140,7 +111,7 @@ type DiffProgram struct { func (d *DiffProgram) getCommand(args ...string) exec.Cmd { diff := "" - if envDiff := os.Getenv("KUBERNETES_EXTERNAL_DIFF"); envDiff != "" { + if envDiff := os.Getenv("KUBECTL_EXTERNAL_DIFF"); envDiff != "" { diff = envDiff } else { diff = "diff" @@ -156,15 +127,14 @@ func (d *DiffProgram) getCommand(args ...string) exec.Cmd { // Run runs the detected diff program. `from` and `to` are the directory to diff. func (d *DiffProgram) Run(from, to string) error { - d.getCommand(from, to).Run() // Ignore diff return code - return nil + return d.getCommand(from, to).Run() } // Printer is used to print an object. type Printer struct{} // Print the object inside the writer w. -func (p *Printer) Print(obj map[string]interface{}, w io.Writer) error { +func (p *Printer) Print(obj runtime.Object, w io.Writer) error { if obj == nil { return nil } @@ -195,16 +165,12 @@ func NewDiffVersion(name string) (*DiffVersion, error) { }, nil } -func (v *DiffVersion) getObject(obj Object) (map[string]interface{}, error) { +func (v *DiffVersion) getObject(obj Object) (runtime.Object, error) { switch v.Name { case "LIVE": - return obj.Live() + return obj.Live(), nil case "MERGED": return obj.Merged() - case "LOCAL": - return obj.Local() - case "LAST": - return obj.Last() } return nil, fmt.Errorf("Unknown version: %v", v.Name) } @@ -254,10 +220,8 @@ func (d *Directory) Delete() error { // Object is an interface that let's you retrieve multiple version of // it. type Object interface { - Local() (map[string]interface{}, error) - Live() (map[string]interface{}, error) - Last() (map[string]interface{}, error) - Merged() (map[string]interface{}, error) + Live() runtime.Object + Merged() (runtime.Object, error) Name() string } @@ -265,84 +229,77 @@ type Object interface { // InfoObject is an implementation of the Object interface. It gets all // the information from the Info object. type InfoObject struct { - Remote *unstructured.Unstructured - Info *resource.Info - Encoder runtime.Encoder - Parser *parse.Factory + LocalObj runtime.Object + Info *resource.Info + Encoder runtime.Encoder + OpenAPI openapi.Resources + Force bool } var _ Object = &InfoObject{} -func (obj InfoObject) toMap(data []byte) (map[string]interface{}, error) { - m := map[string]interface{}{} - if len(data) == 0 { - return m, nil - } - err := json.Unmarshal(data, &m) - return m, err -} - -func (obj InfoObject) Local() (map[string]interface{}, error) { - data, err := runtime.Encode(obj.Encoder, obj.Info.Object) - if err != nil { - return nil, err - } - return obj.toMap(data) -} - -func (obj InfoObject) Live() (map[string]interface{}, error) { - if obj.Remote == nil { - return nil, nil // Object doesn't exist on cluster. - } - return obj.Remote.UnstructuredContent(), nil +// Returns the live version of the object +func (obj InfoObject) Live() runtime.Object { + return obj.Info.Object } -func (obj InfoObject) Merged() (map[string]interface{}, error) { - local, err := obj.Local() - if err != nil { - return nil, err +// Returns the "merged" object, as it would look like if applied or +// created. +func (obj InfoObject) Merged() (runtime.Object, error) { + // Build the patcher, and then apply the patch with dry-run, unless the object doesn't exist, in which case we need to create it. + if obj.Live() == nil { + // Dry-run create if the object doesn't exist. + return resource.NewHelper(obj.Info.Client, obj.Info.Mapping).Create( + obj.Info.Namespace, + true, + obj.LocalObj, + &metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}}, + ) } - live, err := obj.Live() - if err != nil { - return nil, err - } - - last, err := obj.Last() - if err != nil { - return nil, err - } - - if live == nil || last == nil { - return local, nil // We probably don't have a live version, merged is local. + var resourceVersion *string + if !obj.Force { + accessor, err := meta.Accessor(obj.Info.Object) + if err != nil { + return nil, err + } + str := accessor.GetResourceVersion() + resourceVersion = &str } - elmt, err := obj.Parser.CreateElement(last, local, live) + modified, err := kubectl.GetModifiedConfiguration(obj.LocalObj, false, unstructured.UnstructuredJSONScheme) if err != nil { return nil, err } - result, err := elmt.Merge(strategy.Create(strategy.Options{})) - return result.MergedResult.(map[string]interface{}), err -} -func (obj InfoObject) Last() (map[string]interface{}, error) { - if obj.Remote == nil { - return nil, nil // No object is live, return empty - } - accessor, err := meta.Accessor(obj.Remote) - if err != nil { - return nil, err - } - annots := accessor.GetAnnotations() - if annots == nil { - return nil, nil // Not an error, just empty. + // This is using the patcher from apply, to keep the same behavior. + // We plan on replacing this with server-side apply when it becomes available. + patcher := &apply.Patcher{ + Mapping: obj.Info.Mapping, + Helper: resource.NewHelper(obj.Info.Client, obj.Info.Mapping), + Overwrite: true, + BackOff: clockwork.NewRealClock(), + ServerDryRun: true, + OpenapiSchema: obj.OpenAPI, + ResourceVersion: resourceVersion, } - return obj.toMap([]byte(annots[api.LastAppliedConfigAnnotation])) + _, result, err := patcher.Patch(obj.Info.Object, modified, obj.Info.Source, obj.Info.Namespace, obj.Info.Name, nil) + return result, err } func (obj InfoObject) Name() string { - return obj.Info.Name + group := "" + if obj.Info.Mapping.GroupVersionKind.Group != "" { + group = fmt.Sprintf("%v.", obj.Info.Mapping.GroupVersionKind.Group) + } + return group + fmt.Sprintf( + "%v.%v.%v.%v", + obj.Info.Mapping.GroupVersionKind.Version, + obj.Info.Mapping.GroupVersionKind.Kind, + obj.Info.Namespace, + obj.Info.Name, + ) } // Differ creates two DiffVersion and diffs them. @@ -389,61 +346,35 @@ func (d *Differ) TearDown() { d.To.Dir.Delete() // Ignore error } -type Downloader struct { - mapper meta.RESTMapper - dclient dynamic.Interface - ns string +func isConflict(err error) bool { + return err != nil && errors.IsConflict(err) } -func NewDownloader(f cmdutil.Factory) (*Downloader, error) { - var err error - var d Downloader - - d.mapper, err = f.ToRESTMapper() - if err != nil { - return nil, err - } - d.dclient, err = f.DynamicClient() +// RunDiff uses the factory to parse file arguments, find the version to +// diff, and find each Info object for each files, and runs against the +// differ. +func RunDiff(f cmdutil.Factory, diff *DiffProgram, options *DiffOptions) error { + schema, err := f.OpenAPISchema() if err != nil { - return nil, err + return err } - d.ns, _, _ = f.ToRawKubeConfigLoader().Namespace() - return &d, nil -} - -func (d *Downloader) Download(info *resource.Info) (*unstructured.Unstructured, error) { - gvk := info.Object.GetObjectKind().GroupVersionKind() - mapping, err := d.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + discovery, err := f.ToDiscoveryClient() if err != nil { - return nil, err - } - - var resource dynamic.ResourceInterface - switch mapping.Scope.Name() { - case meta.RESTScopeNameNamespace: - if info.Namespace == "" { - info.Namespace = d.ns - } - resource = d.dclient.Resource(mapping.Resource).Namespace(info.Namespace) - case meta.RESTScopeNameRoot: - resource = d.dclient.Resource(mapping.Resource) + return err } - return resource.Get(info.Name, metav1.GetOptions{}) -} - -// RunDiff uses the factory to parse file arguments, find the version to -// diff, and find each Info object for each files, and runs against the -// differ. -func RunDiff(f cmdutil.Factory, diff *DiffProgram, options *DiffOptions, from, to string) error { - openapi, err := f.OpenAPISchema() + dynamic, err := f.DynamicClient() if err != nil { return err } - parser := &parse.Factory{Resources: openapi} - differ, err := NewDiffer(from, to) + dryRunVerifier := &apply.DryRunVerifier{ + Finder: cmdutil.NewCRDFinder(cmdutil.CRDFromDynamic(dynamic)), + OpenAPIGetter: discovery, + } + + differ, err := NewDiffer("LIVE", "MERGED") if err != nil { return err } @@ -460,38 +391,56 @@ func RunDiff(f cmdutil.Factory, diff *DiffProgram, options *DiffOptions, from, t Unstructured(). NamespaceParam(cmdNamespace).DefaultNamespace(). FilenameParam(enforceNamespace, &options.FilenameOptions). - Local(). Flatten(). Do() if err := r.Err(); err != nil { return err } - dl, err := NewDownloader(f) - if err != nil { - return err - } - err = r.Visit(func(info *resource.Info, err error) error { if err != nil { return err } - remote, _ := dl.Download(info) - obj := InfoObject{ - Remote: remote, - Info: info, - Parser: parser, - Encoder: cmdutil.InternalVersionJSONEncoder(), + if err := dryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + return err } - return differ.Diff(obj, printer) + local := info.Object.DeepCopyObject() + for i := 1; i <= maxRetries; i++ { + if err = info.Get(); err != nil { + if !errors.IsNotFound(err) { + return err + } + info.Object = nil + } + + force := i == maxRetries + if force { + klog.Warningf( + "Object (%v: %v) keeps changing, diffing without lock", + info.Object.GetObjectKind().GroupVersionKind(), + info.Name, + ) + } + obj := InfoObject{ + LocalObj: local, + Info: info, + Encoder: scheme.DefaultJSONEncoder(), + OpenAPI: schema, + Force: force, + } + + err = differ.Diff(obj, printer) + if !isConflict(err) { + break + } + } + return err }) if err != nil { return err } - differ.Run(diff) - - return nil + return differ.Run(diff) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain/BUILD.bazel new file mode 100644 index 0000000000000..1dd074d409e79 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain/BUILD.bazel @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["drain.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/drain", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain/drain.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain/drain.go index 578f850c2db33..d24ab8ac6bbb1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain/drain.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package drain import ( "errors" @@ -23,7 +23,6 @@ import ( "strings" "time" - "github.com/jonboulle/clockwork" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" @@ -32,7 +31,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/json" @@ -46,11 +44,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) type DrainOptions struct { @@ -65,12 +62,10 @@ type DrainOptions struct { GracePeriodSeconds int IgnoreDaemonsets bool Timeout time.Duration - backOff clockwork.Clock DeleteLocalData bool Selector string PodSelector string nodeInfos []*resource.Info - typer runtime.ObjectTyper genericclioptions.IOStreams } @@ -114,11 +109,11 @@ func NewCmdCordon(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cob } cmd := &cobra.Command{ - Use: "cordon NODE", + Use: "cordon NODE", DisableFlagsInUseLine: true, - Short: i18n.T("Mark node as unschedulable"), - Long: cordon_long, - Example: cordon_example, + Short: i18n.T("Mark node as unschedulable"), + Long: cordon_long, + Example: cordon_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(f, cmd, args)) cmdutil.CheckErr(options.RunCordonOrUncordon(true)) @@ -145,11 +140,11 @@ func NewCmdUncordon(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *c } cmd := &cobra.Command{ - Use: "uncordon NODE", + Use: "uncordon NODE", DisableFlagsInUseLine: true, - Short: i18n.T("Mark node as schedulable"), - Long: uncordon_long, - Example: uncordon_example, + Short: i18n.T("Mark node as schedulable"), + Long: uncordon_long, + Example: uncordon_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(f, cmd, args)) cmdutil.CheckErr(options.RunCordonOrUncordon(false)) @@ -165,9 +160,9 @@ var ( Drain node in preparation for maintenance. The given node will be marked unschedulable to prevent new pods from arriving. - 'drain' evicts the pods if the APIServer supports eviction - (http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use normal DELETE - to delete the pods. + 'drain' evicts the pods if the APIServer supports + [eviction](http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use normal + DELETE to delete the pods. The 'drain' evicts or deletes all pods except mirror pods (which cannot be deleted through the API server). If there are DaemonSet-managed pods, drain will not proceed without --ignore-daemonsets, and regardless it will not delete any @@ -199,7 +194,6 @@ func NewDrainOptions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) * PrintFlags: genericclioptions.NewPrintFlags("drained").WithTypeSetter(scheme.Scheme), IOStreams: ioStreams, - backOff: clockwork.NewRealClock(), GracePeriodSeconds: -1, } } @@ -208,11 +202,11 @@ func NewCmdDrain(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr options := NewDrainOptions(f, ioStreams) cmd := &cobra.Command{ - Use: "drain NODE", + Use: "drain NODE", DisableFlagsInUseLine: true, - Short: i18n.T("Drain node in preparation for maintenance"), - Long: drain_long, - Example: drain_example, + Short: i18n.T("Drain node in preparation for maintenance"), + Long: drain_long, + Example: drain_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(f, cmd, args)) cmdutil.CheckErr(options.RunDrain()) @@ -241,9 +235,6 @@ func (o *DrainOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st if len(args) > 0 && len(o.Selector) > 0 { return cmdutil.UsageErrorf(cmd, "error: cannot specify both a node name and a --selector option") } - if len(args) > 0 && len(args) != 1 { - return cmdutil.UsageErrorf(cmd, fmt.Sprintf("USAGE: %s [flags]", cmd.Use)) - } o.DryRun = cmdutil.GetDryRunFlag(cmd) @@ -284,7 +275,7 @@ func (o *DrainOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st } builder := f.NewBuilder(). - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). NamespaceParam(o.Namespace).DefaultNamespace(). ResourceNames("nodes", args...). SingleResourceType(). @@ -732,63 +723,63 @@ func (o *DrainOptions) RunCordonOrUncordon(desired bool) error { for _, nodeInfo := range o.nodeInfos { if nodeInfo.Mapping.GroupVersionKind.Kind == "Node" { - obj, err := legacyscheme.Scheme.ConvertToVersion(nodeInfo.Object, nodeInfo.Mapping.GroupVersionKind.GroupVersion()) + obj, err := scheme.Scheme.ConvertToVersion(nodeInfo.Object, nodeInfo.Mapping.GroupVersionKind.GroupVersion()) if err != nil { - fmt.Printf("error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err) + fmt.Fprintf(o.ErrOut, "error: unable to %s node %q: %v\n", cordonOrUncordon, nodeInfo.Name, err) continue } oldData, err := json.Marshal(obj) if err != nil { - fmt.Printf("error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err) + fmt.Fprintf(o.ErrOut, "error: unable to %s node %q: %v\n", cordonOrUncordon, nodeInfo.Name, err) continue } node, ok := obj.(*corev1.Node) if !ok { - fmt.Fprintf(o.ErrOut, "error: unable to %s node %q: unexpected Type%T, expected Node", cordonOrUncordon, nodeInfo.Name, obj) + fmt.Fprintf(o.ErrOut, "error: unable to %s node %q: unexpected Type%T, expected Node\n", cordonOrUncordon, nodeInfo.Name, obj) continue } unsched := node.Spec.Unschedulable if unsched == desired { printObj, err := o.ToPrinter(already(desired)) if err != nil { - fmt.Printf("error: %v", err) + fmt.Fprintf(o.ErrOut, "error: %v\n", err) continue } - printObj(cmdutil.AsDefaultVersionedOrOriginal(nodeInfo.Object, nodeInfo.Mapping), o.Out) + printObj(nodeInfo.Object, o.Out) } else { if !o.DryRun { helper := resource.NewHelper(o.restClient, nodeInfo.Mapping) node.Spec.Unschedulable = desired newData, err := json.Marshal(obj) if err != nil { - fmt.Fprintf(o.ErrOut, "error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err) + fmt.Fprintf(o.ErrOut, "error: unable to %s node %q: %v\n", cordonOrUncordon, nodeInfo.Name, err) continue } patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, obj) if err != nil { - fmt.Printf("error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err) + fmt.Fprintf(o.ErrOut, "error: unable to %s node %q: %v\n", cordonOrUncordon, nodeInfo.Name, err) continue } _, err = helper.Patch(o.Namespace, nodeInfo.Name, types.StrategicMergePatchType, patchBytes, nil) if err != nil { - fmt.Printf("error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err) + fmt.Fprintf(o.ErrOut, "error: unable to %s node %q: %v\n", cordonOrUncordon, nodeInfo.Name, err) continue } } printObj, err := o.ToPrinter(changed(desired)) if err != nil { - fmt.Fprintf(o.ErrOut, "%v", err) + fmt.Fprintf(o.ErrOut, "%v\n", err) continue } - printObj(cmdutil.AsDefaultVersionedOrOriginal(nodeInfo.Object, nodeInfo.Mapping), o.Out) + printObj(nodeInfo.Object, o.Out) } } else { printObj, err := o.ToPrinter("skipped") if err != nil { - fmt.Fprintf(o.ErrOut, "%v", err) + fmt.Fprintf(o.ErrOut, "%v\n", err) continue } - printObj(cmdutil.AsDefaultVersionedOrOriginal(nodeInfo.Object, nodeInfo.Mapping), o.Out) + printObj(nodeInfo.Object, o.Out) } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/edit/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/edit/BUILD.bazel new file mode 100644 index 0000000000000..3061c86e94566 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/edit/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["edit.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/edit", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/edit", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/edit.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/edit/edit.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/edit.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/edit/edit.go index 65d9cf0c4a265..ab121f41ed5e3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/edit.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/edit/edit.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package edit import ( "fmt" @@ -22,10 +22,10 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -72,11 +72,11 @@ func NewCmdEdit(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra o.ValidateOptions = cmdutil.ValidateOptions{EnableValidation: true} cmd := &cobra.Command{ - Use: "edit (RESOURCE/NAME | -f FILENAME)", + Use: "edit (RESOURCE/NAME | -f FILENAME)", DisableFlagsInUseLine: true, - Short: i18n.T("Edit a resource on the server"), - Long: editLong, - Example: fmt.Sprintf(editExample), + Short: i18n.T("Edit a resource on the server"), + Long: editLong, + Example: fmt.Sprintf(editExample), Run: func(cmd *cobra.Command, args []string) { if err := o.Complete(f, args, cmd); err != nil { cmdutil.CheckErr(err) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec/BUILD.bazel new file mode 100644 index 0000000000000..8e722c512d27f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["exec.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/exec", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/docker/docker/pkg/term:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/term:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/util/interrupt:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec/exec.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec/exec.go index 9e17037557a0f..2cf2aa57bb6cc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec/exec.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package exec import ( "fmt" @@ -30,11 +30,10 @@ import ( coreclient "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" - "k8s.io/kubernetes/pkg/api/legacyscheme" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "k8s.io/kubernetes/pkg/kubectl/util/term" "k8s.io/kubernetes/pkg/util/interrupt" ) @@ -73,11 +72,11 @@ func NewCmdExec(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.C Executor: &DefaultRemoteExecutor{}, } cmd := &cobra.Command{ - Use: "exec POD [-c CONTAINER] -- COMMAND [args...]", + Use: "exec POD [-c CONTAINER] -- COMMAND [args...]", DisableFlagsInUseLine: true, - Short: i18n.T("Execute a command in a container"), - Long: "Execute a command in a container.", - Example: exec_example, + Short: i18n.T("Execute a command in a container"), + Long: "Execute a command in a container.", + Example: exec_example, Run: func(cmd *cobra.Command, args []string) { argsLenAtDash := cmd.ArgsLenAtDash() cmdutil.CheckErr(options.Complete(f, cmd, args, argsLenAtDash)) @@ -213,7 +212,7 @@ func (p *ExecOptions) Validate() error { return nil } -func (o *StreamOptions) setupTTY() term.TTY { +func (o *StreamOptions) SetupTTY() term.TTY { t := term.TTY{ Parent: o.InterruptParent, Out: o.Out, @@ -289,7 +288,7 @@ func (p *ExecOptions) Run() error { } // ensure we can recover the terminal while attached - t := p.setupTTY() + t := p.SetupTTY() var sizeQueue remotecommand.TerminalSizeQueue if t.Raw { @@ -314,14 +313,14 @@ func (p *ExecOptions) Run() error { Namespace(pod.Namespace). SubResource("exec"). Param("container", containerName) - req.VersionedParams(&api.PodExecOptions{ + req.VersionedParams(&corev1.PodExecOptions{ Container: containerName, Command: p.Command, Stdin: p.Stdin, Stdout: p.Out != nil, Stderr: p.ErrOut != nil, TTY: t.Raw, - }, legacyscheme.ParameterCodec) + }, scheme.ParameterCodec) return p.Executor.Execute("POST", req.URL(), p.Config, p.In, p.Out, p.ErrOut, t.Raw, sizeQueue) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/explain/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/explain/BUILD.bazel new file mode 100644 index 0000000000000..d593968a714ad --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/explain/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["explain.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/explain", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/explain", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/explain:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/explain.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/explain/explain.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/explain.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/explain/explain.go index d76bef0868d88..73f6289fb4c0f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/explain.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/explain/explain.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package explain import ( "fmt" @@ -24,11 +24,11 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" "k8s.io/kubernetes/pkg/kubectl/explain" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -74,11 +74,11 @@ func NewCmdExplain(parent string, f cmdutil.Factory, streams genericclioptions.I o := NewExplainOptions(parent, streams) cmd := &cobra.Command{ - Use: "explain RESOURCE", + Use: "explain RESOURCE", DisableFlagsInUseLine: true, - Short: i18n.T("Documentation of resources"), - Long: explainLong + "\n\n" + cmdutil.SuggestApiResources(parent), - Example: explainExamples, + Short: i18n.T("Documentation of resources"), + Long: explainLong + "\n\n" + cmdutil.SuggestApiResources(parent), + Example: explainExamples, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd)) cmdutil.CheckErr(o.Validate(args)) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/expose/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/expose/BUILD.bazel new file mode 100644 index 0000000000000..4a0c2c36663b9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/expose/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["expose.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/expose", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/expose", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/generate:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/expose.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/expose/expose.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/expose.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/expose/expose.go index 6a9222838fda5..2727e61c1bfc1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/expose.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/expose/expose.go @@ -14,14 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package expose import ( "regexp" "strings" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,11 +34,13 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/client-go/dynamic" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -90,7 +92,7 @@ type ExposeServiceOptions struct { DryRun bool EnforceNamespace bool - Generators func(string) map[string]kubectl.Generator + Generators func(string) map[string]generate.Generator CanBeExposed polymorphichelpers.CanBeExposedFunc MapBasedSelectorForObject func(runtime.Object) (string, error) PortsForObject polymorphichelpers.PortsForObjectFunc @@ -126,11 +128,11 @@ func NewCmdExposeService(f cmdutil.Factory, streams genericclioptions.IOStreams) } cmd := &cobra.Command{ - Use: "expose (-f FILENAME | TYPE NAME) [--port=port] [--protocol=TCP|UDP|SCTP] [--target-port=number-or-name] [--name=name] [--external-ip=external-ip-of-service] [--type=type]", + Use: "expose (-f FILENAME | TYPE NAME) [--port=port] [--protocol=TCP|UDP|SCTP] [--target-port=number-or-name] [--name=name] [--external-ip=external-ip-of-service] [--type=type]", DisableFlagsInUseLine: true, - Short: i18n.T("Take a replication controller, service, deployment or pod and expose it as a new Kubernetes Service"), - Long: exposeLong, - Example: exposeExample, + Short: i18n.T("Take a replication controller, service, deployment or pod and expose it as a new Kubernetes Service"), + Long: exposeLong, + Example: exposeExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd)) cmdutil.CheckErr(o.RunExpose(cmd, args)) @@ -187,7 +189,7 @@ func (o *ExposeServiceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) e return err } - o.Generators = cmdutil.GeneratorFn + o.Generators = generateversioned.GeneratorFn o.Builder = f.NewBuilder() o.CanBeExposed = polymorphichelpers.CanBeExposedFn o.MapBasedSelectorForObject = polymorphichelpers.MapBasedSelectorForObjectFn @@ -240,7 +242,7 @@ func (o *ExposeServiceOptions) RunExpose(cmd *cobra.Command, args []string) erro return err } - params := kubectl.MakeParams(cmd, names) + params := generate.MakeParams(cmd, names) name := info.Name if len(name) > validation.DNS1035LabelMaxLength { name = name[:validation.DNS1035LabelMaxLength] @@ -249,7 +251,7 @@ func (o *ExposeServiceOptions) RunExpose(cmd *cobra.Command, args []string) erro // For objects that need a pod selector, derive it from the exposed object in case a user // didn't explicitly specify one via --selector - if s, found := params["selector"]; found && kubectl.IsZero(s) { + if s, found := params["selector"]; found && generate.IsZero(s) { s, err := o.MapBasedSelectorForObject(info.Object) if err != nil { return cmdutil.UsageErrorf(cmd, "couldn't retrieve selectors via --selector flag or introspection: %v", err) @@ -261,7 +263,7 @@ func (o *ExposeServiceOptions) RunExpose(cmd *cobra.Command, args []string) erro // For objects that need a port, derive it from the exposed object in case a user // didn't explicitly specify one via --port - if port, found := params["port"]; found && kubectl.IsZero(port) { + if port, found := params["port"]; found && generate.IsZero(port) { ports, err := o.PortsForObject(info.Object) if err != nil { return cmdutil.UsageErrorf(cmd, "couldn't find port via --port flag or introspection: %v", err) @@ -285,23 +287,23 @@ func (o *ExposeServiceOptions) RunExpose(cmd *cobra.Command, args []string) erro if err != nil { return cmdutil.UsageErrorf(cmd, "couldn't find protocol via introspection: %v", err) } - if protocols := kubectl.MakeProtocols(protocolsMap); !kubectl.IsZero(protocols) { + if protocols := generate.MakeProtocols(protocolsMap); !generate.IsZero(protocols) { params["protocols"] = protocols } } - if kubectl.IsZero(params["labels"]) { + if generate.IsZero(params["labels"]) { labels, err := meta.NewAccessor().Labels(info.Object) if err != nil { return err } - params["labels"] = kubectl.MakeLabels(labels) + params["labels"] = generate.MakeLabels(labels) } - if err = kubectl.ValidateParams(names, params); err != nil { + if err = generate.ValidateParams(names, params); err != nil { return err } // Check for invalid flags used against the present generator. - if err := kubectl.EnsureFlagsValid(cmd, generators, generatorName); err != nil { + if err := generate.EnsureFlagsValid(cmd, generators, generatorName); err != nil { return err } @@ -319,13 +321,13 @@ func (o *ExposeServiceOptions) RunExpose(cmd *cobra.Command, args []string) erro } if err := o.Recorder.Record(object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } if o.DryRun { return o.PrintObj(object, o.Out) } - if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), object, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), object, scheme.DefaultJSONEncoder()); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/BUILD.bazel index 341364b462c44..7a3fc265b1cc0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/BUILD.bazel @@ -3,16 +3,19 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "customcolumn.go", + "customcolumn_flags.go", "get.go", "get_flags.go", "humanreadable_flags.go", + "sorter.go", ], importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get", importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/get", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -24,19 +27,23 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/watch:go_default_library", + "//vendor/k8s.io/client-go/util/integer:go_default_library", + "//vendor/k8s.io/client-go/util/jsonpath:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/printers:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", "//vendor/k8s.io/kubernetes/pkg/printers:go_default_library", "//vendor/k8s.io/kubernetes/pkg/printers/internalversion:go_default_library", "//vendor/k8s.io/kubernetes/pkg/util/interrupt:go_default_library", + "//vendor/vbom.ml/util/sortorder:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/printers/customcolumn.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/customcolumn.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/printers/customcolumn.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/customcolumn.go index b0ab6bc239cbd..576d4995af4a4 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/customcolumn.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/customcolumn.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package printers +package get import ( "bufio" @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/client-go/util/jsonpath" + utilprinters "k8s.io/kubernetes/pkg/kubectl/util/printers" ) var jsonRegexp = regexp.MustCompile("^\\{\\.?([^{}]+)\\}$|^\\.?([^{}]+)$") @@ -160,7 +161,7 @@ func (s *CustomColumnsPrinter) PrintObj(obj runtime.Object, out io.Writer) error } if w, found := out.(*tabwriter.Writer); !found { - w = GetNewTabWriter(out) + w = utilprinters.GetNewTabWriter(out) out = w defer w.Flush() } diff --git a/vendor/k8s.io/kubernetes/pkg/printers/customcolumn_flags.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/customcolumn_flags.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/printers/customcolumn_flags.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/customcolumn_flags.go index 599d91c2728f0..b8f2f4002b240 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/customcolumn_flags.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/customcolumn_flags.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package printers +package get import ( "fmt" @@ -24,6 +24,7 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/scheme" ) @@ -51,7 +52,7 @@ func (f *CustomColumnsPrintFlags) AllowedFormats() []string { // handling custom-column printing. // Returns false if the specified templateFormat does not match a supported format. // Supported format types can be found in pkg/printers/printers.go -func (f *CustomColumnsPrintFlags) ToPrinter(templateFormat string) (ResourcePrinter, error) { +func (f *CustomColumnsPrintFlags) ToPrinter(templateFormat string) (printers.ResourcePrinter, error) { if len(templateFormat) == 0 { return nil, genericclioptions.NoCompatiblePrinterError{} } @@ -79,7 +80,8 @@ func (f *CustomColumnsPrintFlags) ToPrinter(templateFormat string) (ResourcePrin return nil, fmt.Errorf("custom-columns format specified but no custom columns given") } - decoder := scheme.Codecs.UniversalDecoder() + // UniversalDecoder call must specify parameter versions; otherwise it will decode to internal versions. + decoder := scheme.Codecs.UniversalDecoder(scheme.Scheme.PrioritizedVersionsAllGroups()...) if templateFormat == "custom-columns-file" { file, err := os.Open(templateValue) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/get.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/get.go index 185dac34385d0..5708a67751711 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/get.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/get.go @@ -23,9 +23,10 @@ import ( "io" "net/url" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" + corev1 "k8s.io/api/core/v1" kapierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,16 +38,15 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/watch" "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/client-go/rest" watchtools "k8s.io/client-go/tools/watch" "k8s.io/kubernetes/pkg/api/legacyscheme" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" + utilprinters "k8s.io/kubernetes/pkg/kubectl/util/printers" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "k8s.io/kubernetes/pkg/util/interrupt" ) @@ -150,11 +150,11 @@ func NewCmdGet(parent string, f cmdutil.Factory, streams genericclioptions.IOStr o := NewGetOptions(parent, streams) cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|wide|custom-columns=...|custom-columns-file=...|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=...] (TYPE[.VERSION][.GROUP] [NAME | -l label] | TYPE[.VERSION][.GROUP]/NAME ...) [flags]", + Use: "get [(-o|--output=)json|yaml|wide|custom-columns=...|custom-columns-file=...|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=...] (TYPE[.VERSION][.GROUP] [NAME | -l label] | TYPE[.VERSION][.GROUP]/NAME ...) [flags]", DisableFlagsInUseLine: true, - Short: i18n.T("Display one or many resources"), - Long: getLong + "\n\n" + cmdutil.SuggestApiResources(parent), - Example: getExample, + Short: i18n.T("Display one or many resources"), + Long: getLong + "\n\n" + cmdutil.SuggestApiResources(parent), + Example: getExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate(cmd)) @@ -314,11 +314,14 @@ type RuntimeSorter struct { } func (r *RuntimeSorter) Sort() error { - if len(r.objects) <= 1 { - // a list is only considered "sorted" if there are 0 or 1 items in it - // AND (if 1 item) the item is not a Table object + // a list is only considered "sorted" if there are 0 or 1 items in it + // AND (if 1 item) the item is not a Table object + if len(r.objects) == 0 { + return nil + } + if len(r.objects) == 1 { _, isTable := r.objects[0].(*metav1beta1.Table) - if len(r.objects) == 0 || !isTable { + if !isTable { return nil } } @@ -331,8 +334,10 @@ func (r *RuntimeSorter) Sort() error { case *metav1beta1.Table: includesTable = true - if err := kubectl.NewTableSorter(t, r.field).Sort(); err != nil { - continue + if sorter, err := NewTableSorter(t, r.field); err != nil { + return err + } else if err := sorter.Sort(); err != nil { + return err } default: includesRuntimeObjs = true @@ -354,7 +359,7 @@ func (r *RuntimeSorter) Sort() error { // if not dealing with a Table response from the server, assume // all objects are runtime.Object as usual, and sort using old method. var err error - if r.positioner, err = kubectl.SortObjects(r.decoder, r.objects, r.field); err != nil { + if r.positioner, err = SortObjects(r.decoder, r.objects, r.field); err != nil { return err } return nil @@ -374,14 +379,14 @@ func (r *RuntimeSorter) WithDecoder(decoder runtime.Decoder) *RuntimeSorter { } func NewRuntimeSorter(objects []runtime.Object, sortBy string) *RuntimeSorter { - parsedField, err := printers.RelaxedJSONPathExpression(sortBy) + parsedField, err := RelaxedJSONPathExpression(sortBy) if err != nil { parsedField = sortBy } return &RuntimeSorter{ field: parsedField, - decoder: cmdutil.InternalVersionDecoder(), + decoder: legacyscheme.Codecs.UniversalDecoder(), objects: objects, } } @@ -471,7 +476,7 @@ func (o *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e } else { // if we are unable to decode server response into a v1beta1.Table, // fallback to client-side printing with whatever info the server returned. - glog.V(2).Infof("Unable to decode server response into a Table. Falling back to hardcoded types: %v", err) + klog.V(2).Infof("Unable to decode server response into a Table. Falling back to hardcoded types: %v", err) } } @@ -495,7 +500,7 @@ func (o *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e var printer printers.ResourcePrinter var lastMapping *meta.RESTMapping nonEmptyObjCount := 0 - w := printers.GetNewTabWriter(o.Out) + w := utilprinters.GetNewTabWriter(o.Out) for ix := range objs { var mapping *meta.RESTMapping var info *resource.Info @@ -555,7 +560,7 @@ func (o *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e internalObj, err := legacyscheme.Scheme.ConvertToVersion(info.Object, info.Mapping.GroupVersionKind.GroupKind().WithVersion(runtime.APIVersionInternal).GroupVersion()) if err != nil { // if there's an error, try to print what you have (mirrors old behavior). - glog.V(1).Info(err) + klog.V(1).Info(err) printer.PrintObj(info.Object, w) } else { printer.PrintObj(internalObj, w) @@ -645,7 +650,7 @@ func (o *GetOptions) watch(f cmdutil.Factory, cmd *cobra.Command, args []string) // print the current object if !o.WatchOnly { var objsToPrint []runtime.Object - writer := printers.GetNewTabWriter(o.Out) + writer := utilprinters.GetNewTabWriter(o.Out) if isList { objsToPrint, _ = meta.ExtractList(obj) @@ -704,7 +709,7 @@ func (o *GetOptions) watch(f cmdutil.Factory, cmd *cobra.Command, args []string) func attemptToConvertToInternal(obj runtime.Object, converter runtime.ObjectConvertor, targetVersion schema.GroupVersion) runtime.Object { internalObject, err := converter.ConvertToVersion(obj, targetVersion) if err != nil { - glog.V(1).Infof("Unable to convert %T to %v: %v", obj, targetVersion, err) + klog.V(1).Infof("Unable to convert %T to %v: %v", obj, targetVersion, err) return obj } return internalObject @@ -768,8 +773,8 @@ func (o *GetOptions) printGeneric(r *resource.Result) error { // we have more than one item, so coerce all items into a list. // we don't want an *unstructured.Unstructured list yet, as we // may be dealing with non-unstructured objects. Compose all items - // into an api.List, and then decode using an unstructured scheme. - list := api.List{ + // into an corev1.List, and then decode using an unstructured scheme. + list := corev1.List{ TypeMeta: metav1.TypeMeta{ Kind: "List", APIVersion: "v1", @@ -777,7 +782,7 @@ func (o *GetOptions) printGeneric(r *resource.Result) error { ListMeta: metav1.ListMeta{}, } for _, info := range infos { - list.Items = append(list.Items, info.Object) + list.Items = append(list.Items, runtime.RawExtension{Object: info.Object}) } listData, err := json.Marshal(list) @@ -852,7 +857,7 @@ func cmdSpecifiesOutputFmt(cmd *cobra.Command) bool { func maybeWrapSortingPrinter(printer printers.ResourcePrinter, sortBy string) printers.ResourcePrinter { if len(sortBy) != 0 { - return &kubectl.SortingPrinter{ + return &SortingPrinter{ Delegate: printer, SortField: fmt.Sprintf("%s", sortBy), } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/get_flags.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/get_flags.go index 4bfeafd5b2a60..93700a85524f4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/get_flags.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/get_flags.go @@ -24,8 +24,8 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" - "k8s.io/kubernetes/pkg/printers" ) // PrintFlags composes common printer flag structs @@ -33,7 +33,7 @@ import ( type PrintFlags struct { JSONYamlPrintFlags *genericclioptions.JSONYamlPrintFlags NamePrintFlags *genericclioptions.NamePrintFlags - CustomColumnsFlags *printers.CustomColumnsPrintFlags + CustomColumnsFlags *CustomColumnsPrintFlags HumanReadableFlags *HumanPrintFlags TemplateFlags *genericclioptions.KubeTemplatePrintFlags @@ -185,6 +185,6 @@ func NewGetPrintFlags() *PrintFlags { TemplateFlags: genericclioptions.NewKubeTemplatePrintFlags(), HumanReadableFlags: NewHumanPrintFlags(), - CustomColumnsFlags: printers.NewCustomColumnsPrintFlags(), + CustomColumnsFlags: NewCustomColumnsPrintFlags(), } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/sorter.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/sorter.go similarity index 82% rename from vendor/k8s.io/kubernetes/pkg/kubectl/sorter.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/sorter.go index b7c339dfb2bbf..d806decd19a9b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/sorter.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/sorter.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package get import ( "fmt" @@ -22,22 +22,22 @@ import ( "reflect" "sort" - "github.com/golang/glog" + "k8s.io/klog" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/client-go/util/integer" "k8s.io/client-go/util/jsonpath" - "k8s.io/kubernetes/pkg/printers" "vbom.ml/util/sortorder" ) -// Sorting printer sorts list types before delegating to another printer. +// SortingPrinter sorts list types before delegating to another printer. // Non-list types are simply passed through type SortingPrinter struct { SortField string @@ -71,7 +71,7 @@ func (s *SortingPrinter) sortObj(obj runtime.Object) error { } switch list := obj.(type) { - case *v1.List: + case *corev1.List: outputList := make([]runtime.RawExtension, len(objs)) for ix := range objs { outputList[ix] = list.Items[sorter.OriginalPosition(ix)] @@ -96,7 +96,7 @@ func SortObjects(decoder runtime.Decoder, objs []runtime.Object, fieldInput stri } } - field, err := printers.RelaxedJSONPathExpression(fieldInput) + field, err := RelaxedJSONPathExpression(fieldInput) if err != nil { return nil, err } @@ -272,12 +272,12 @@ func (r *RuntimeSort) Less(i, j int) bool { iValues, err = findJSONPathResults(parser, iObj) if err != nil { - glog.Fatalf("Failed to get i values for %#v using %s (%#v)", iObj, r.field, err) + klog.Fatalf("Failed to get i values for %#v using %s (%#v)", iObj, r.field, err) } jValues, err = findJSONPathResults(parser, jObj) if err != nil { - glog.Fatalf("Failed to get j values for %#v using %s (%v)", jObj, r.field, err) + klog.Fatalf("Failed to get j values for %#v using %s (%v)", jObj, r.field, err) } if len(iValues) == 0 || len(iValues[0]) == 0 { @@ -291,12 +291,13 @@ func (r *RuntimeSort) Less(i, j int) bool { less, err := isLess(iField, jField) if err != nil { - glog.Fatalf("Field %s in %T is an unsortable type: %s, err: %v", r.field, iObj, iField.Kind().String(), err) + klog.Fatalf("Field %s in %T is an unsortable type: %s, err: %v", r.field, iObj, iField.Kind().String(), err) } return less } -// Returns the starting (original) position of a particular index. e.g. If OriginalPosition(0) returns 5 than the +// OriginalPosition returns the starting (original) position of a particular index. +// e.g. If OriginalPosition(0) returns 5 than the // the item currently at position 0 was at position 5 in the original unsorted array. func (r *RuntimeSort) OriginalPosition(ix int) int { if ix < 0 || ix > len(r.origPosition) { @@ -306,8 +307,9 @@ func (r *RuntimeSort) OriginalPosition(ix int) int { } type TableSorter struct { - field string - obj *metav1beta1.Table + field string + obj *metav1beta1.Table + parsedRows [][][]reflect.Value } func (t *TableSorter) Len() int { @@ -316,37 +318,18 @@ func (t *TableSorter) Len() int { func (t *TableSorter) Swap(i, j int) { t.obj.Rows[i], t.obj.Rows[j] = t.obj.Rows[j], t.obj.Rows[i] + t.parsedRows[i], t.parsedRows[j] = t.parsedRows[j], t.parsedRows[i] } func (t *TableSorter) Less(i, j int) bool { - iObj := t.obj.Rows[i].Object.Object - jObj := t.obj.Rows[j].Object.Object + iValues := t.parsedRows[i] + jValues := t.parsedRows[j] - var iValues [][]reflect.Value - var jValues [][]reflect.Value - var err error - - parser := jsonpath.New("sorting").AllowMissingKeys(true) - err = parser.Parse(t.field) - if err != nil { - glog.Fatalf("sorting error: %v\n", err) - } - - // TODO(juanvallejo): this is expensive for very large sets. - // To improve runtime complexity, build an array which contains all - // resolved fields, and sort that instead. - iValues, err = findJSONPathResults(parser, iObj) - if err != nil { - glog.Fatalf("Failed to get i values for %#v using %s (%#v)", iObj, t.field, err) - } - - jValues, err = findJSONPathResults(parser, jObj) - if err != nil { - glog.Fatalf("Failed to get j values for %#v using %s (%v)", jObj, t.field, err) + if len(iValues) == 0 || len(iValues[0]) == 0 { + return true } - - if len(iValues) == 0 || len(iValues[0]) == 0 || len(jValues) == 0 || len(jValues[0]) == 0 { - glog.Fatalf("couldn't find any field with path %q in the list of objects", t.field) + if len(jValues) == 0 || len(jValues[0]) == 0 { + return false } iField := iValues[0][0] @@ -354,7 +337,7 @@ func (t *TableSorter) Less(i, j int) bool { less, err := isLess(iField, jField) if err != nil { - glog.Fatalf("Field %s in %T is an unsortable type: %s, err: %v", t.field, iObj, iField.Kind().String(), err) + klog.Fatalf("Field %s in %T is an unsortable type: %s, err: %v", t.field, t.parsedRows, iField.Kind().String(), err) } return less } @@ -364,17 +347,40 @@ func (t *TableSorter) Sort() error { return nil } -func NewTableSorter(table *metav1beta1.Table, field string) *TableSorter { - return &TableSorter{ - obj: table, - field: field, +func NewTableSorter(table *metav1beta1.Table, field string) (*TableSorter, error) { + var parsedRows [][][]reflect.Value + + parser := jsonpath.New("sorting").AllowMissingKeys(true) + err := parser.Parse(field) + if err != nil { + return nil, fmt.Errorf("sorting error: %v", err) + } + + fieldFoundOnce := false + for i := range table.Rows { + parsedRow, err := findJSONPathResults(parser, table.Rows[i].Object.Object) + if err != nil { + return nil, fmt.Errorf("Failed to get values for %#v using %s (%#v)", parsedRow, field, err) + } + parsedRows = append(parsedRows, parsedRow) + if len(parsedRow) > 0 && len(parsedRow[0]) > 0 { + fieldFoundOnce = true + } } -} + if len(table.Rows) > 0 && !fieldFoundOnce { + return nil, fmt.Errorf("couldn't find any field with path %q in the list of objects", field) + } + + return &TableSorter{ + obj: table, + field: field, + parsedRows: parsedRows, + }, nil +} func findJSONPathResults(parser *jsonpath.JSONPath, from runtime.Object) ([][]reflect.Value, error) { if unstructuredObj, ok := from.(*unstructured.Unstructured); ok { return parser.FindResults(unstructuredObj.Object) } - return parser.FindResults(reflect.ValueOf(from).Elem().Interface()) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/help.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/help.go deleted file mode 100644 index 783a87c64db2a..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/help.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmd - -import ( - "strings" - - "github.com/spf13/cobra" - - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" - "k8s.io/kubernetes/pkg/kubectl/util/i18n" -) - -var helpLong = templates.LongDesc(i18n.T(` - Help provides help for any command in the application. - Simply type kubectl help [path to command] for full details.`)) - -func NewCmdHelp() *cobra.Command { - cmd := &cobra.Command{ - Use: "help [command] | STRING_TO_SEARCH", - DisableFlagsInUseLine: true, - Short: i18n.T("Help about any command"), - Long: helpLong, - - Run: RunHelp, - } - - return cmd -} - -func RunHelp(cmd *cobra.Command, args []string) { - foundCmd, _, err := cmd.Root().Find(args) - - // NOTE(andreykurilin): actually, I did not find any cases when foundCmd can be nil, - // but let's make this check since it is included in original code of initHelpCmd - // from github.com/spf13/cobra - if foundCmd == nil { - cmd.Printf("Unknown help topic %#q.\n", args) - cmd.Root().Usage() - } else if err != nil { - // print error message at first, since it can contain suggestions - cmd.Println(err) - - argsString := strings.Join(args, " ") - var matchedMsgIsPrinted bool = false - for _, foundCmd := range foundCmd.Commands() { - if strings.Contains(foundCmd.Short, argsString) { - if !matchedMsgIsPrinted { - cmd.Printf("Matchers of string '%s' in short descriptions of commands: \n", argsString) - matchedMsgIsPrinted = true - } - cmd.Printf(" %-14s %s\n", foundCmd.Name(), foundCmd.Short) - } - } - - if !matchedMsgIsPrinted { - // if nothing is found, just print usage - cmd.Root().Usage() - } - } else { - if len(args) == 0 { - // help message for help command :) - foundCmd = cmd - } - helpFunc := foundCmd.HelpFunc() - helpFunc(foundCmd, args) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/label/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/label/BUILD.bazel new file mode 100644 index 0000000000000..e68b1ab088869 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/label/BUILD.bazel @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["label.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/label", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/label", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/evanphx/json-patch:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/label.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/label/label.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/label.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/label/label.go index 4aedac04ef144..11251a278d21d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/label.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/label/label.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package label import ( "fmt" @@ -22,8 +22,8 @@ import ( "strings" jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,10 +37,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // LabelOptions have the data required to perform the label operation @@ -125,11 +125,11 @@ func NewCmdLabel(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr o := NewLabelOptions(ioStreams) cmd := &cobra.Command{ - Use: "label [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]", + Use: "label [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]", DisableFlagsInUseLine: true, - Short: i18n.T("Update the labels on a resource"), - Long: fmt.Sprintf(labelLong, validation.LabelValueMaxLength), - Example: labelExample, + Short: i18n.T("Update the labels on a resource"), + Long: fmt.Sprintf(labelLong, validation.LabelValueMaxLength), + Example: labelExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -252,18 +252,25 @@ func (o *LabelOptions) RunLabel() error { } var outputObj runtime.Object - dataChangeMsg := "not labeled" + var dataChangeMsg string + obj := info.Object + oldData, err := json.Marshal(obj) + if err != nil { + return err + } if o.dryrun || o.local || o.list { - err = labelFunc(info.Object, o.overwrite, o.resourceVersion, o.newLabels, o.removeLabels) + err = labelFunc(obj, o.overwrite, o.resourceVersion, o.newLabels, o.removeLabels) + if err != nil { + return err + } + newObj, err := json.Marshal(obj) if err != nil { return err } - dataChangeMsg = "labeled" + dataChangeMsg = updateDataChangeMsg(oldData, newObj) outputObj = info.Object } else { - obj := info.Object name, namespace := info.Name, info.Namespace - oldData, err := json.Marshal(obj) if err != nil { return err } @@ -281,19 +288,17 @@ func (o *LabelOptions) RunLabel() error { return err } if err := o.Recorder.Record(obj); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } - newData, err := json.Marshal(obj) + newObj, err := json.Marshal(obj) if err != nil { return err } - if !reflect.DeepEqual(oldData, newData) { - dataChangeMsg = "labeled" - } - patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + dataChangeMsg = updateDataChangeMsg(oldData, newObj) + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newObj) createdPatch := err == nil if err != nil { - glog.V(2).Infof("couldn't compute patch: %v", err) + klog.V(2).Infof("couldn't compute patch: %v", err) } mapping := info.ResourceMapping() @@ -344,6 +349,14 @@ func (o *LabelOptions) RunLabel() error { }) } +func updateDataChangeMsg(oldObj []byte, newObj []byte) string { + msg := "not labeled" + if !reflect.DeepEqual(oldObj, newObj) { + msg = "labeled" + } + return msg +} + func validateNoOverwrites(accessor metav1.Object, labels map[string]string) error { allErrs := []error{} for key := range labels { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs/BUILD.bazel new file mode 100644 index 0000000000000..066be412c95dc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["logs.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/logs", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs/logs.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs/logs.go index 5f727c29b1376..5f8f3162b19b2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs/logs.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package logs import ( "errors" @@ -30,12 +30,12 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) const ( @@ -75,6 +75,10 @@ var ( logsUsageErrStr = fmt.Sprintf("expected '%s'.\nPOD or TYPE/NAME is a required argument for the logs command", logsUsageStr) ) +const ( + defaultPodLogsTimeout = 20 * time.Second +) + type LogsOptions struct { Namespace string ResourceArg string @@ -119,14 +123,14 @@ func NewCmdLogs(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.C o := NewLogsOptions(streams, false) cmd := &cobra.Command{ - Use: logsUsageStr, + Use: logsUsageStr, DisableFlagsInUseLine: true, - Short: i18n.T("Print the logs for a container in a pod"), - Long: "Print the logs for a container in a pod or specified resource. If the pod has only one container, the container name is optional.", - Example: logsExample, + Short: i18n.T("Print the logs for a container in a pod"), + Long: "Print the logs for a container in a pod or specified resource. If the pod has only one container, the container name is optional.", + Example: logsExample, PreRun: func(cmd *cobra.Command, args []string) { if len(os.Args) > 1 && os.Args[1] == "log" { - printDeprecationWarning(o.ErrOut, "logs", "log") + fmt.Fprintf(o.ErrOut, "%s is DEPRECATED and will be removed in a future version. Use %s instead.\n", "log", "logs") } }, Run: func(cmd *cobra.Command, args []string) { @@ -177,7 +181,7 @@ func (o *LogsOptions) ToLogOptions() (*corev1.PodLogOptions, error) { logOptions.SinceSeconds = &sec } - if len(o.Selector) > 0 && o.Tail != -1 { + if len(o.Selector) > 0 && o.Tail == -1 { logOptions.TailLines = &selectorTail } else if o.Tail != -1 { logOptions.TailLines = &o.Tail diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/options/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/options/BUILD.bazel new file mode 100644 index 0000000000000..a66439947db2d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/options/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["options.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/options", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/options", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/options.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/options/options.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/options.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/options/options.go index 77230675ad6d9..2b24f2140fe10 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/options.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/options/options.go @@ -14,13 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package options import ( "io" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "github.com/spf13/cobra" ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/patch/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/patch/BUILD.bazel new file mode 100644 index 0000000000000..4bb9925da2e3c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/patch/BUILD.bazel @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["patch.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/patch", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/patch", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/evanphx/json-patch:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/patch.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/patch/patch.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/patch.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/patch/patch.go index e5eb13b38bda6..cb8eb0c8c0ba9 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/patch.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/patch/patch.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package patch import ( "fmt" @@ -22,8 +22,8 @@ import ( "strings" jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -36,10 +36,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var patchTypes = map[string]types.PatchType{"json": types.JSONPatchType, "merge": types.MergePatchType, "strategic": types.StrategicMergePatchType} @@ -107,11 +107,11 @@ func NewCmdPatch(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr o := NewPatchOptions(ioStreams) cmd := &cobra.Command{ - Use: "patch (-f FILENAME | TYPE NAME) -p PATCH", + Use: "patch (-f FILENAME | TYPE NAME) -p PATCH", DisableFlagsInUseLine: true, - Short: i18n.T("Update field(s) of a resource using strategic merge patch"), - Long: patchLong, - Example: patchExample, + Short: i18n.T("Update field(s) of a resource using strategic merge patch"), + Long: patchLong, + Example: patchExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -229,10 +229,10 @@ func (o *PatchOptions) RunPatch() error { // if the recorder makes a change, compute and create another patch if mergePatch, err := o.Recorder.MakeRecordMergePatch(patchedObj); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } else if len(mergePatch) > 0 { if recordedObj, err := helper.Patch(info.Namespace, info.Name, types.MergePatchType, mergePatch, nil); err != nil { - glog.V(4).Infof("error recording reason: %v", err) + klog.V(4).Infof("error recording reason: %v", err) } else { patchedObj = recordedObj } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/plugin/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/plugin/BUILD.bazel new file mode 100644 index 0000000000000..fb3bf134b037a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/plugin/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["plugin.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/plugin", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/plugin", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/plugin.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/plugin/plugin.go similarity index 85% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/plugin.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/plugin/plugin.go index 76b3aa895924a..528ede0e70ca6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/plugin.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/plugin/plugin.go @@ -14,9 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package plugin import ( + "bytes" "fmt" "io/ioutil" "os" @@ -26,11 +27,10 @@ import ( "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -52,10 +52,10 @@ var ( func NewCmdPlugin(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { cmd := &cobra.Command{ - Use: "plugin [flags]", + Use: "plugin [flags]", DisableFlagsInUseLine: true, - Short: i18n.T("Provides utilities for interacting with plugins."), - Long: plugin_long, + Short: i18n.T("Provides utilities for interacting with plugins."), + Long: plugin_long, Run: func(cmd *cobra.Command, args []string) { cmdutil.DefaultSubCommandRun(streams.ErrOut)(cmd, args) }, @@ -108,11 +108,12 @@ func (o *PluginListOptions) Run() error { pluginsFound := false isFirstFile := true + pluginErrors := []error{} pluginWarnings := 0 - paths := sets.NewString(filepath.SplitList(os.Getenv(path))...) - for _, dir := range paths.List() { + for _, dir := range filepath.SplitList(os.Getenv(path)) { files, err := ioutil.ReadDir(dir) if err != nil { + pluginErrors = append(pluginErrors, fmt.Errorf("error: unable to read directory %q in your PATH: %v", dir, err)) continue } @@ -146,15 +147,23 @@ func (o *PluginListOptions) Run() error { } if !pluginsFound { - return fmt.Errorf("error: unable to find any kubectl plugins in your PATH") + pluginErrors = append(pluginErrors, fmt.Errorf("error: unable to find any kubectl plugins in your PATH")) } if pluginWarnings > 0 { - fmt.Fprintln(o.ErrOut) if pluginWarnings == 1 { - return fmt.Errorf("one plugin warning was found") + pluginErrors = append(pluginErrors, fmt.Errorf("error: one plugin warning was found")) + } else { + pluginErrors = append(pluginErrors, fmt.Errorf("error: %v plugin warnings were found", pluginWarnings)) + } + } + if len(pluginErrors) > 0 { + fmt.Fprintln(o.ErrOut) + errs := bytes.NewBuffer(nil) + for _, e := range pluginErrors { + fmt.Fprintln(errs, e) } - return fmt.Errorf("%v plugin warnings were found", pluginWarnings) + return fmt.Errorf("%s", errs.String()) } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/portforward/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/portforward/BUILD.bazel new file mode 100644 index 0000000000000..d85648896a992 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/portforward/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["portforward.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/portforward", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/portforward", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/portforward:go_default_library", + "//vendor/k8s.io/client-go/transport/spdy:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/portforward.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/portforward/portforward.go similarity index 74% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/portforward.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/portforward/portforward.go index 5c0095bb3b088..4194f4f336b8c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/portforward.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/portforward/portforward.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package portforward import ( "fmt" @@ -36,11 +36,11 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/portforward" "k8s.io/client-go/transport/spdy" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // PortForwardOptions contains all the options for running the port-forward cli command. @@ -50,6 +50,7 @@ type PortForwardOptions struct { RESTClient *restclient.RESTClient Config *restclient.Config PodClient corev1client.PodsGetter + Address []string Ports []string PortForwarder portForwarder StopChannel chan struct{} @@ -79,6 +80,12 @@ var ( # Listen on port 8888 locally, forwarding to 5000 in the pod kubectl port-forward pod/mypod 8888:5000 + # Listen on port 8888 on all addresses, forwarding to 5000 in the pod + kubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000 + + # Listen on port 8888 on localhost and selected IP, forwarding to 5000 in the pod + kubectl port-forward --address localhost,10.19.21.23 pod/mypod 8888:5000 + # Listen on a random port locally, forwarding to 5000 in the pod kubectl port-forward pod/mypod :5000`)) ) @@ -95,11 +102,11 @@ func NewCmdPortForward(f cmdutil.Factory, streams genericclioptions.IOStreams) * }, } cmd := &cobra.Command{ - Use: "port-forward TYPE/NAME [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]", + Use: "port-forward TYPE/NAME [options] [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]", DisableFlagsInUseLine: true, - Short: i18n.T("Forward one or more local ports to a pod"), - Long: portforwardLong, - Example: portforwardExample, + Short: i18n.T("Forward one or more local ports to a pod"), + Long: portforwardLong, + Example: portforwardExample, Run: func(cmd *cobra.Command, args []string) { if err := opts.Complete(f, cmd, args); err != nil { cmdutil.CheckErr(err) @@ -113,6 +120,7 @@ func NewCmdPortForward(f cmdutil.Factory, streams genericclioptions.IOStreams) * }, } cmdutil.AddPodRunningTimeoutFlag(cmd, defaultPodPortForwardWaitTimeout) + cmd.Flags().StringSliceVar(&opts.Address, "address", []string{"localhost"}, "Addresses to listen on (comma separated)") // TODO support UID return cmd } @@ -131,13 +139,24 @@ func (f *defaultPortForwarder) ForwardPorts(method string, url *url.URL, opts Po return err } dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, method, url) - fw, err := portforward.New(dialer, opts.Ports, opts.StopChannel, opts.ReadyChannel, f.Out, f.ErrOut) + fw, err := portforward.NewOnAddresses(dialer, opts.Address, opts.Ports, opts.StopChannel, opts.ReadyChannel, f.Out, f.ErrOut) if err != nil { return err } return fw.ForwardPorts() } +// splitPort splits port string which is in form of [LOCAL PORT]:REMOTE PORT +// and returns local and remote ports separately +func splitPort(port string) (local, remote string) { + parts := strings.Split(port, ":") + if len(parts) == 2 { + return parts[0], parts[1] + } + + return parts[0], parts[0] +} + // Translates service port to target port // It rewrites ports as needed if the Service port declares targetPort. // It returns an error when a named targetPort can't find a match in the pod, or the Service did not declare @@ -145,31 +164,63 @@ func (f *defaultPortForwarder) ForwardPorts(method string, url *url.URL, opts Po func translateServicePortToTargetPort(ports []string, svc corev1.Service, pod corev1.Pod) ([]string, error) { var translated []string for _, port := range ports { - // port is in the form of [LOCAL PORT]:REMOTE PORT - parts := strings.Split(port, ":") - input := parts[0] - if len(parts) == 2 { - input = parts[1] - } - portnum, err := strconv.Atoi(input) + localPort, remotePort := splitPort(port) + + portnum, err := strconv.Atoi(remotePort) if err != nil { - return ports, err + svcPort, err := util.LookupServicePortNumberByName(svc, remotePort) + if err != nil { + return nil, err + } + portnum = int(svcPort) + + if localPort == remotePort { + localPort = strconv.Itoa(portnum) + } } containerPort, err := util.LookupContainerPortNumberByServicePort(svc, pod, int32(portnum)) if err != nil { // can't resolve a named port, or Service did not declare this port, return an error return nil, err + } + + if int32(portnum) != containerPort { + translated = append(translated, fmt.Sprintf("%s:%d", localPort, containerPort)) } else { - if int32(portnum) != containerPort { - translated = append(translated, fmt.Sprintf("%s:%d", parts[0], containerPort)) - } else { - translated = append(translated, port) - } + translated = append(translated, port) } } return translated, nil } +// convertPodNamedPortToNumber converts named ports into port numbers +// It returns an error when a named port can't be found in the pod containers +func convertPodNamedPortToNumber(ports []string, pod corev1.Pod) ([]string, error) { + var converted []string + for _, port := range ports { + localPort, remotePort := splitPort(port) + + containerPortStr := remotePort + _, err := strconv.Atoi(remotePort) + if err != nil { + containerPort, err := util.LookupContainerPortNumberByName(pod, remotePort) + if err != nil { + return nil, err + } + + containerPortStr = strconv.Itoa(int(containerPort)) + } + + if localPort != remotePort { + converted = append(converted, fmt.Sprintf("%s:%s", localPort, containerPortStr)) + } else { + converted = append(converted, containerPortStr) + } + } + + return converted, nil +} + // Complete completes all the required options for port-forward cmd. func (o *PortForwardOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error @@ -215,7 +266,10 @@ func (o *PortForwardOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg return err } default: - o.Ports = args[1:] + o.Ports, err = convertPodNamedPortToNumber(args[1:], *forwardablePod) + if err != nil { + return err + } } clientset, err := f.KubernetesClientSet() diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/profiling.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/profiling.go new file mode 100644 index 0000000000000..2a1c1ce3c152d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/profiling.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "fmt" + "os" + "runtime" + "runtime/pprof" + + "github.com/spf13/pflag" +) + +var ( + profileName string + profileOutput string +) + +func addProfilingFlags(flags *pflag.FlagSet) { + flags.StringVar(&profileName, "profile", "none", "Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex)") + flags.StringVar(&profileOutput, "profile-output", "profile.pprof", "Name of the file to write the profile to") +} + +func initProfiling() error { + switch profileName { + case "none": + return nil + case "cpu": + f, err := os.Create(profileOutput) + if err != nil { + return err + } + return pprof.StartCPUProfile(f) + // Block and mutex profiles need a call to Set{Block,Mutex}ProfileRate to + // output anything. We choose to sample all events. + case "block": + runtime.SetBlockProfileRate(1) + return nil + case "mutex": + runtime.SetMutexProfileFraction(1) + return nil + default: + // Check the profile name is valid. + if profile := pprof.Lookup(profileName); profile == nil { + return fmt.Errorf("unknown profile '%s'", profileName) + } + } + + return nil +} + +func flushProfiling() error { + switch profileName { + case "none": + return nil + case "cpu": + pprof.StopCPUProfile() + case "heap": + runtime.GC() + fallthrough + default: + profile := pprof.Lookup(profileName) + if profile == nil { + return nil + } + f, err := os.Create(profileOutput) + if err != nil { + return err + } + profile.WriteTo(f, 0) + } + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/proxy/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/proxy/BUILD.bazel new file mode 100644 index 0000000000000..765730ffde16f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/proxy/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["proxy.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/proxy", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/proxy", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/proxy:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/proxy.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/proxy/proxy.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/proxy.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/proxy/proxy.go index e2daa184d253e..bf54bb21fb86d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/proxy.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/proxy/proxy.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package proxy import ( "errors" @@ -24,21 +24,21 @@ import ( "os" "strings" - "github.com/golang/glog" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + "k8s.io/klog" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/proxy" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( defaultPort = 8001 proxyLong = templates.LongDesc(i18n.T(` - Creates a proxy server or application-level gateway between localhost and - the Kubernetes API Server. It also allows serving static content over specified - HTTP path. All incoming data enters through one port and gets forwarded to + Creates a proxy server or application-level gateway between localhost and + the Kubernetes API Server. It also allows serving static content over specified + HTTP path. All incoming data enters through one port and gets forwarded to the remote kubernetes API Server port, except for the path matching the static content path.`)) proxyExample = templates.Examples(i18n.T(` @@ -72,11 +72,11 @@ var ( func NewCmdProxy(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { cmd := &cobra.Command{ - Use: "proxy [--port=PORT] [--www=static-dir] [--www-prefix=prefix] [--api-prefix=prefix]", + Use: "proxy [--port=PORT] [--www=static-dir] [--www-prefix=prefix] [--api-prefix=prefix]", DisableFlagsInUseLine: true, - Short: i18n.T("Run a proxy to the Kubernetes API server"), - Long: proxyLong, - Example: proxyExample, + Short: i18n.T("Run a proxy to the Kubernetes API server"), + Long: proxyLong, + Example: proxyExample, Run: func(cmd *cobra.Command, args []string) { err := RunProxy(f, streams.Out, cmd) cmdutil.CheckErr(err) @@ -119,9 +119,9 @@ func RunProxy(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) error { if staticDir != "" { fileInfo, err := os.Stat(staticDir) if err != nil { - glog.Warning("Failed to stat static file directory "+staticDir+": ", err) + klog.Warning("Failed to stat static file directory "+staticDir+": ", err) } else if !fileInfo.IsDir() { - glog.Warning("Static file directory " + staticDir + " is not a directory") + klog.Warning("Static file directory " + staticDir + " is not a directory") } } @@ -137,7 +137,7 @@ func RunProxy(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) error { } if cmdutil.GetFlagBool(cmd, "disable-filter") { if path == "" { - glog.Warning("Request filter disabled, your proxy is vulnerable to XSRF attacks, please be cautious") + klog.Warning("Request filter disabled, your proxy is vulnerable to XSRF attacks, please be cautious") } filter = nil } @@ -155,9 +155,9 @@ func RunProxy(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) error { l, err = server.ListenUnix(path) } if err != nil { - glog.Fatal(err) + klog.Fatal(err) } fmt.Fprintf(out, "Starting to serve on %s\n", l.Addr().String()) - glog.Fatal(server.ServeOnListener(l)) + klog.Fatal(server.ServeOnListener(l)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/replace/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/replace/BUILD.bazel new file mode 100644 index 0000000000000..8c239ebc58e08 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/replace/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["replace.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/replace", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/replace", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/validation:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/replace.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/replace/replace.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/replace.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/replace/replace.go index d5ed3379f6501..dcc8ddcb87818 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/replace.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/replace/replace.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package replace import ( "fmt" @@ -25,7 +25,7 @@ import ( "github.com/spf13/cobra" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -33,9 +33,11 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + "k8s.io/kubernetes/pkg/kubectl/cmd/delete" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "k8s.io/kubernetes/pkg/kubectl/validation" ) @@ -66,10 +68,10 @@ var ( type ReplaceOptions struct { PrintFlags *genericclioptions.PrintFlags - DeleteFlags *DeleteFlags RecordFlags *genericclioptions.RecordFlags - DeleteOptions *DeleteOptions + DeleteFlags *delete.DeleteFlags + DeleteOptions *delete.DeleteOptions PrintObj func(obj runtime.Object) error @@ -91,7 +93,7 @@ type ReplaceOptions struct { func NewReplaceOptions(streams genericclioptions.IOStreams) *ReplaceOptions { return &ReplaceOptions{ PrintFlags: genericclioptions.NewPrintFlags("replaced"), - DeleteFlags: NewDeleteFlags("to use to replace the resource."), + DeleteFlags: delete.NewDeleteFlags("to use to replace the resource."), IOStreams: streams, } @@ -101,11 +103,11 @@ func NewCmdReplace(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobr o := NewReplaceOptions(streams) cmd := &cobra.Command{ - Use: "replace -f FILENAME", + Use: "replace -f FILENAME", DisableFlagsInUseLine: true, - Short: i18n.T("Replace a resource by filename or stdin"), - Long: replaceLong, - Example: replaceExample, + Short: i18n.T("Replace a resource by filename or stdin"), + Long: replaceLong, + Example: replaceExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate(cmd)) @@ -218,12 +220,12 @@ func (o *ReplaceOptions) Run() error { return err } - if err := kubectl.CreateOrUpdateAnnotation(o.createAnnotation, info.Object, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(o.createAnnotation, info.Object, scheme.DefaultJSONEncoder()); err != nil { return cmdutil.AddSourceToErr("replacing", info.Source, err) } if err := o.Recorder.Record(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } // Serialize the object with the annotation applied. @@ -309,12 +311,12 @@ func (o *ReplaceOptions) forceReplace() error { return err } - if err := kubectl.CreateOrUpdateAnnotation(o.createAnnotation, info.Object, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(o.createAnnotation, info.Object, scheme.DefaultJSONEncoder()); err != nil { return err } if err := o.Recorder.Record(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } obj, err := resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, true, info.Object, nil) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate/BUILD.bazel new file mode 100644 index 0000000000000..8ca7b3a87db61 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate/BUILD.bazel @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["rollingupdate.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/validation:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate/rollingupdate.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate/rollingupdate.go index 01250e51f6d64..b39ad2049f6e1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate/rollingupdate.go @@ -14,15 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package rollingupdate import ( "bytes" "fmt" "time" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1" @@ -36,11 +36,11 @@ import ( "k8s.io/client-go/kubernetes" scaleclient "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "k8s.io/kubernetes/pkg/kubectl/validation" ) @@ -72,10 +72,10 @@ var ( kubectl rolling-update frontend-v1 frontend-v2 --rollback`)) ) -var ( - updatePeriod, _ = time.ParseDuration("1m0s") - timeout, _ = time.ParseDuration("5m0s") - pollInterval, _ = time.ParseDuration("3s") +const ( + updatePeriod = 1 * time.Minute + timeout = 5 * time.Minute + pollInterval = 3 * time.Second ) type RollingUpdateOptions struct { @@ -129,13 +129,13 @@ func NewCmdRollingUpdate(f cmdutil.Factory, ioStreams genericclioptions.IOStream o := NewRollingUpdateOptions(ioStreams) cmd := &cobra.Command{ - Use: "rolling-update OLD_CONTROLLER_NAME ([NEW_CONTROLLER_NAME] --image=NEW_CONTAINER_IMAGE | -f NEW_CONTROLLER_SPEC)", + Use: "rolling-update OLD_CONTROLLER_NAME ([NEW_CONTROLLER_NAME] --image=NEW_CONTAINER_IMAGE | -f NEW_CONTROLLER_SPEC)", DisableFlagsInUseLine: true, - Short: "Perform a rolling update. This command is deprecated, use rollout instead.", - Long: rollingUpdateLong, - Example: rollingUpdateExample, - Deprecated: `use "rollout" instead`, - Hidden: true, + Short: "Perform a rolling update. This command is deprecated, use rollout instead.", + Long: rollingUpdateLong, + Example: rollingUpdateExample, + Deprecated: `use "rollout" instead`, + Hidden: true, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate(cmd, args)) @@ -292,7 +292,7 @@ func (o *RollingUpdateOptions) Run() error { uncastVersionedObj, err := scheme.Scheme.ConvertToVersion(infos[0].Object, corev1.SchemeGroupVersion) if err != nil { - glog.V(4).Infof("Object %T is not a ReplicationController", infos[0].Object) + klog.V(4).Infof("Object %T is not a ReplicationController", infos[0].Object) return fmt.Errorf("%s contains a %v not a ReplicationController", filename, infos[0].Object.GetObjectKind().GroupVersionKind()) } switch t := uncastVersionedObj.(type) { @@ -301,7 +301,7 @@ func (o *RollingUpdateOptions) Run() error { newRc = t } if newRc == nil { - glog.V(4).Infof("Object %T is not a ReplicationController", infos[0].Object) + klog.V(4).Infof("Object %T is not a ReplicationController", infos[0].Object) return fmt.Errorf("%s contains a %v not a ReplicationController", filename, infos[0].Object.GetObjectKind().GroupVersionKind()) } } @@ -403,10 +403,10 @@ func (o *RollingUpdateOptions) Run() error { if err != nil { return err } - if err := printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(oldRc, nil), oldRcData); err != nil { + if err := printer.PrintObj(oldRc, oldRcData); err != nil { return err } - if err := printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(newRc, nil), newRcData); err != nil { + if err := printer.PrintObj(newRc, newRcData); err != nil { return err } } @@ -455,7 +455,7 @@ func (o *RollingUpdateOptions) Run() error { if err != nil { return err } - return printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(newRc, nil), o.Out) + return printer.PrintObj(newRc, o.Out) } func findNewName(args []string, oldRc *corev1.ReplicationController) string { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/BUILD.bazel index 16444b855e13e..6d05b580e97a1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/BUILD.bazel @@ -31,14 +31,13 @@ go_library( "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/watch:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", "//vendor/k8s.io/kubernetes/pkg/util/interrupt:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout.go index d62c79ae62cca..38b2c5297535f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout.go @@ -21,9 +21,9 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -48,12 +48,12 @@ var ( func NewCmdRollout(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { cmd := &cobra.Command{ - Use: "rollout SUBCOMMAND", + Use: "rollout SUBCOMMAND", DisableFlagsInUseLine: true, - Short: i18n.T("Manage the rollout of a resource"), - Long: rollout_long, - Example: rollout_example, - Run: cmdutil.DefaultSubCommandRun(streams.Out), + Short: i18n.T("Manage the rollout of a resource"), + Long: rollout_long, + Example: rollout_example, + Run: cmdutil.DefaultSubCommandRun(streams.Out), } // subcommands cmd.AddCommand(NewCmdRolloutHistory(f, streams)) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_history.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_history.go index e58c9262fde37..009c8d76ca4e4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_history.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_history.go @@ -24,11 +24,11 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -74,11 +74,11 @@ func NewCmdRolloutHistory(f cmdutil.Factory, streams genericclioptions.IOStreams validArgs := []string{"deployment", "daemonset", "statefulset"} cmd := &cobra.Command{ - Use: "history (TYPE NAME | TYPE/NAME) [flags]", + Use: "history (TYPE NAME | TYPE/NAME) [flags]", DisableFlagsInUseLine: true, - Short: i18n.T("View rollout history"), - Long: history_long, - Example: history_example, + Short: i18n.T("View rollout history"), + Long: history_long, + Example: history_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -168,6 +168,6 @@ func (o *RolloutHistoryOptions) Run() error { return err } - return printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out) + return printer.PrintObj(info.Object, o.Out) }) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_pause.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_pause.go index 34f2616b67aa2..70c5af4a69806 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_pause.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_pause.go @@ -26,13 +26,12 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubectl/cmd/set" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // PauseOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of @@ -75,11 +74,11 @@ func NewCmdRolloutPause(f cmdutil.Factory, streams genericclioptions.IOStreams) validArgs := []string{"deployment"} cmd := &cobra.Command{ - Use: "pause RESOURCE", + Use: "pause RESOURCE", DisableFlagsInUseLine: true, - Short: i18n.T("Mark the provided resource as paused"), - Long: pause_long, - Example: pause_example, + Short: i18n.T("Mark the provided resource as paused"), + Long: pause_long, + Example: pause_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -124,7 +123,7 @@ func (o *PauseOptions) Validate() error { func (o PauseOptions) RunPause() error { r := o.Builder(). - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). NamespaceParam(o.Namespace).DefaultNamespace(). FilenameParam(o.EnforceNamespace, &o.FilenameOptions). ResourceTypeOrNameArgs(true, o.Resources...). @@ -147,7 +146,7 @@ func (o PauseOptions) RunPause() error { allErrs = append(allErrs, err) } - for _, patch := range set.CalculatePatches(infos, cmdutil.InternalVersionJSONEncoder(), set.PatchFn(o.Pauser)) { + for _, patch := range set.CalculatePatches(infos, scheme.DefaultJSONEncoder(), set.PatchFn(o.Pauser)) { info := patch.Info if patch.Err != nil { @@ -165,7 +164,7 @@ func (o PauseOptions) RunPause() error { allErrs = append(allErrs, err) continue } - if err = printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out); err != nil { + if err = printer.PrintObj(info.Object, o.Out); err != nil { allErrs = append(allErrs, err) } continue @@ -183,7 +182,7 @@ func (o PauseOptions) RunPause() error { allErrs = append(allErrs, err) continue } - if err = printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out); err != nil { + if err = printer.PrintObj(info.Object, o.Out); err != nil { allErrs = append(allErrs, err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_resume.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_resume.go index 8e518937bff39..e97c807173c0f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_resume.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_resume.go @@ -26,13 +26,12 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubectl/cmd/set" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // ResumeOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of @@ -78,11 +77,11 @@ func NewCmdRolloutResume(f cmdutil.Factory, streams genericclioptions.IOStreams) validArgs := []string{"deployment"} cmd := &cobra.Command{ - Use: "resume RESOURCE", + Use: "resume RESOURCE", DisableFlagsInUseLine: true, - Short: i18n.T("Resume a paused resource"), - Long: resume_long, - Example: resume_example, + Short: i18n.T("Resume a paused resource"), + Long: resume_long, + Example: resume_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -127,7 +126,7 @@ func (o *ResumeOptions) Validate() error { func (o ResumeOptions) RunResume() error { r := o.Builder(). - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). NamespaceParam(o.Namespace).DefaultNamespace(). FilenameParam(o.EnforceNamespace, &o.FilenameOptions). ResourceTypeOrNameArgs(true, o.Resources...). @@ -150,7 +149,7 @@ func (o ResumeOptions) RunResume() error { allErrs = append(allErrs, err) } - for _, patch := range set.CalculatePatches(infos, cmdutil.InternalVersionJSONEncoder(), set.PatchFn(o.Resumer)) { + for _, patch := range set.CalculatePatches(infos, scheme.DefaultJSONEncoder(), set.PatchFn(o.Resumer)) { info := patch.Info if patch.Err != nil { @@ -168,7 +167,7 @@ func (o ResumeOptions) RunResume() error { allErrs = append(allErrs, err) continue } - if err = printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out); err != nil { + if err = printer.PrintObj(info.Object, o.Out); err != nil { allErrs = append(allErrs, err) } continue @@ -186,7 +185,7 @@ func (o ResumeOptions) RunResume() error { allErrs = append(allErrs, err) continue } - if err = printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out); err != nil { + if err = printer.PrintObj(info.Object, o.Out); err != nil { allErrs = append(allErrs, err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_status.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_status.go index a61242766d77f..311c9c88d9056 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_status.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_status.go @@ -36,11 +36,11 @@ import ( "k8s.io/client-go/tools/cache" watchtools "k8s.io/client-go/tools/watch" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "k8s.io/kubernetes/pkg/util/interrupt" ) @@ -71,9 +71,9 @@ type RolloutStatusOptions struct { Revision int64 Timeout time.Duration - StatusViewer func(*meta.RESTMapping) (kubectl.StatusViewer, error) - Builder func() *resource.Builder - DynamicClient dynamic.Interface + StatusViewerFn func(*meta.RESTMapping) (kubectl.StatusViewer, error) + Builder func() *resource.Builder + DynamicClient dynamic.Interface FilenameOptions *resource.FilenameOptions genericclioptions.IOStreams @@ -95,11 +95,11 @@ func NewCmdRolloutStatus(f cmdutil.Factory, streams genericclioptions.IOStreams) validArgs := []string{"deployment", "daemonset", "statefulset"} cmd := &cobra.Command{ - Use: "status (TYPE NAME | TYPE/NAME) [flags]", + Use: "status (TYPE NAME | TYPE/NAME) [flags]", DisableFlagsInUseLine: true, - Short: i18n.T("Show the status of the rollout"), - Long: status_long, - Example: status_example, + Short: i18n.T("Show the status of the rollout"), + Long: status_long, + Example: status_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, args)) cmdutil.CheckErr(o.Validate()) @@ -127,9 +127,7 @@ func (o *RolloutStatusOptions) Complete(f cmdutil.Factory, args []string) error } o.BuilderArgs = args - o.StatusViewer = func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { - return polymorphichelpers.StatusViewerFn(f, mapping) - } + o.StatusViewerFn = polymorphichelpers.StatusViewerFn clientConfig, err := f.ToRESTConfig() if err != nil { @@ -180,7 +178,7 @@ func (o *RolloutStatusOptions) Run() error { info := infos[0] mapping := info.ResourceMapping() - statusViewer, err := o.StatusViewer(mapping) + statusViewer, err := o.StatusViewerFn(mapping) if err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_undo.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_undo.go index b7e287fb4d751..2ab7a0288713e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_undo.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_undo.go @@ -25,11 +25,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // UndoOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of @@ -79,11 +78,11 @@ func NewCmdRolloutUndo(f cmdutil.Factory, streams genericclioptions.IOStreams) * validArgs := []string{"deployment", "daemonset", "statefulset"} cmd := &cobra.Command{ - Use: "undo (TYPE NAME | TYPE/NAME) [flags]", + Use: "undo (TYPE NAME | TYPE/NAME) [flags]", DisableFlagsInUseLine: true, - Short: i18n.T("Undo a previous rollout"), - Long: undo_long, - Example: undo_example, + Short: i18n.T("Undo a previous rollout"), + Long: undo_long, + Example: undo_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -132,7 +131,7 @@ func (o *UndoOptions) Validate() error { func (o *UndoOptions) RunUndo() error { r := o.Builder(). - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). NamespaceParam(o.Namespace).DefaultNamespace(). FilenameParam(o.EnforceNamespace, &o.FilenameOptions). ResourceTypeOrNameArgs(true, o.Resources...). @@ -163,7 +162,7 @@ func (o *UndoOptions) RunUndo() error { return err } - return printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out) + return printer.PrintObj(info.Object, o.Out) }) return err diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run/BUILD.bazel new file mode 100644 index 0000000000000..709b95945c93d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run/BUILD.bazel @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["run.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/run", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/docker/distribution/reference:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/tools/watch:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/attach:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/delete:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/exec:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/generate:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/util/interrupt:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run/run.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run/run.go index 111ef6363daf7..fdd47b287a801 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run/run.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package run import ( "context" @@ -22,8 +22,8 @@ import ( "time" "github.com/docker/distribution/reference" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -38,13 +38,18 @@ import ( "k8s.io/client-go/kubernetes" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" watchtools "k8s.io/client-go/tools/watch" - "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + "k8s.io/kubernetes/pkg/kubectl/cmd/attach" + "k8s.io/kubernetes/pkg/kubectl/cmd/delete" + "k8s.io/kubernetes/pkg/kubectl/cmd/exec" + "k8s.io/kubernetes/pkg/kubectl/cmd/logs" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/generate" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "k8s.io/kubernetes/pkg/util/interrupt" uexec "k8s.io/utils/exec" ) @@ -93,16 +98,23 @@ var ( kubectl run pi --schedule="0/5 * * * ?" --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'`)) ) +const ( + defaultPodAttachTimeout = 60 * time.Second +) + +var metadataAccessor = meta.NewAccessor() + type RunObject struct { Object runtime.Object Mapping *meta.RESTMapping } type RunOptions struct { - PrintFlags *genericclioptions.PrintFlags - DeleteFlags *DeleteFlags - DeleteOptions *DeleteOptions - RecordFlags *genericclioptions.RecordFlags + PrintFlags *genericclioptions.PrintFlags + RecordFlags *genericclioptions.RecordFlags + + DeleteFlags *delete.DeleteFlags + DeleteOptions *delete.DeleteOptions DryRun bool @@ -129,7 +141,7 @@ type RunOptions struct { func NewRunOptions(streams genericclioptions.IOStreams) *RunOptions { return &RunOptions{ PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), - DeleteFlags: NewDeleteFlags("to use to replace the resource."), + DeleteFlags: delete.NewDeleteFlags("to use to replace the resource."), RecordFlags: genericclioptions.NewRecordFlags(), Recorder: genericclioptions.NoopRecorder{}, @@ -142,11 +154,11 @@ func NewCmdRun(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Co o := NewRunOptions(streams) cmd := &cobra.Command{ - Use: "run NAME --image=image [--env=\"key=value\"] [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json] [--command] -- [COMMAND] [args...]", + Use: "run NAME --image=image [--env=\"key=value\"] [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json] [--command] -- [COMMAND] [args...]", DisableFlagsInUseLine: true, - Short: i18n.T("Run a particular image on the cluster"), - Long: runLong, - Example: runExample, + Short: i18n.T("Run a particular image on the cluster"), + Long: runLong, + Example: runExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd)) cmdutil.CheckErr(o.Run(f, cmd, args)) @@ -299,20 +311,20 @@ func (o *RunOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e generatorName := o.Generator if len(o.Schedule) != 0 && len(generatorName) == 0 { - generatorName = cmdutil.CronJobV1Beta1GeneratorName + generatorName = generateversioned.CronJobV1Beta1GeneratorName } if len(generatorName) == 0 { switch restartPolicy { case corev1.RestartPolicyAlways: - generatorName = cmdutil.DeploymentAppsV1Beta1GeneratorName + generatorName = generateversioned.DeploymentAppsV1GeneratorName case corev1.RestartPolicyOnFailure: - generatorName = cmdutil.JobV1GeneratorName + generatorName = generateversioned.JobV1GeneratorName case corev1.RestartPolicyNever: - generatorName = cmdutil.RunPodV1GeneratorName + generatorName = generateversioned.RunPodV1GeneratorName } // Falling back because the generator was not provided and the default one could be unavailable. - generatorNameTemp, err := cmdutil.FallbackGeneratorNameIfNecessary(generatorName, clientset.Discovery(), o.ErrOut) + generatorNameTemp, err := generateversioned.FallbackGeneratorNameIfNecessary(generatorName, clientset.Discovery(), o.ErrOut) if err != nil { return err } @@ -326,17 +338,17 @@ func (o *RunOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e // start deprecating all generators except for 'run-pod/v1' which will be // the only supported on a route to simple kubectl run which should mimic // docker run - if generatorName != cmdutil.RunPodV1GeneratorName { - fmt.Fprintf(o.ErrOut, "kubectl run --generator=%s is DEPRECATED and will be removed in a future version. Use kubectl create instead.\n", generatorName) + if generatorName != generateversioned.RunPodV1GeneratorName { + fmt.Fprintf(o.ErrOut, "kubectl run --generator=%s is DEPRECATED and will be removed in a future version. Use kubectl run --generator=%s or kubectl create instead.\n", generatorName, generateversioned.RunPodV1GeneratorName) } - generators := cmdutil.GeneratorFn("run") + generators := generateversioned.GeneratorFn("run") generator, found := generators[generatorName] if !found { return cmdutil.UsageErrorf(cmd, "generator %q not found", generatorName) } names := generator.ParamNames() - params := kubectl.MakeParams(cmd, names) + params := generate.MakeParams(cmd, names) params["name"] = args[0] if len(args) > 1 { params["args"] = args[1:] @@ -370,8 +382,8 @@ func (o *RunOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e defer o.removeCreatedObjects(f, createdObjects) } - opts := &AttachOptions{ - StreamOptions: StreamOptions{ + opts := &attach.AttachOptions{ + StreamOptions: exec.StreamOptions{ IOStreams: o.IOStreams, Stdin: o.Interactive, TTY: o.TTY, @@ -380,14 +392,14 @@ func (o *RunOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e GetPodTimeout: timeout, CommandName: cmd.Parent().CommandPath() + " attach", - Attach: &DefaultRemoteAttach{}, + Attach: &attach.DefaultRemoteAttach{}, } config, err := f.ToRESTConfig() if err != nil { return err } opts.Config = config - opts.AttachFunc = defaultAttachFunc + opts.AttachFunc = attach.DefaultAttachFunc clientset, err := kubernetes.NewForConfig(config) if err != nil { @@ -461,7 +473,7 @@ func (o *RunOptions) removeCreatedObjects(f cmdutil.Factory, createdObjects []*R return err } r := f.NewBuilder(). - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). ContinueOnError(). NamespaceParam(namespace).DefaultNamespace(). ResourceNames(obj.Mapping.Resource.Resource+"."+obj.Mapping.Resource.Group, name). @@ -505,7 +517,7 @@ func waitForPod(podClient corev1client.PodsGetter, ns, name string, exitConditio return result, err } -func handleAttachPod(f cmdutil.Factory, podClient corev1client.PodsGetter, ns, name string, opts *AttachOptions) error { +func handleAttachPod(f cmdutil.Factory, podClient corev1client.PodsGetter, ns, name string, opts *attach.AttachOptions) error { pod, err := waitForPod(podClient, ns, name, kubectl.PodRunningAndReady) if err != nil && err != kubectl.ErrPodCompleted { return err @@ -520,7 +532,7 @@ func handleAttachPod(f cmdutil.Factory, podClient corev1client.PodsGetter, ns, n opts.Namespace = ns if opts.AttachFunc == nil { - opts.AttachFunc = defaultAttachFunc + opts.AttachFunc = attach.DefaultAttachFunc } if err := opts.Run(); err != nil { @@ -531,7 +543,7 @@ func handleAttachPod(f cmdutil.Factory, podClient corev1client.PodsGetter, ns, n } // logOpts logs output from opts to the pods log. -func logOpts(restClientGetter genericclioptions.RESTClientGetter, pod *corev1.Pod, opts *AttachOptions) error { +func logOpts(restClientGetter genericclioptions.RESTClientGetter, pod *corev1.Pod, opts *attach.AttachOptions) error { ctrName, err := opts.GetContainerName(pod) if err != nil { return err @@ -542,7 +554,7 @@ func logOpts(restClientGetter genericclioptions.RESTClientGetter, pod *corev1.Po return err } for _, request := range requests { - if err := DefaultConsumeRequest(request, opts.Out); err != nil { + if err := logs.DefaultConsumeRequest(request, opts.Out); err != nil { return err } } @@ -582,7 +594,7 @@ func verifyImagePullPolicy(cmd *cobra.Command) error { } func (o *RunOptions) generateService(f cmdutil.Factory, cmd *cobra.Command, serviceGenerator string, paramsIn map[string]interface{}, namespace string) (*RunObject, error) { - generators := cmdutil.GeneratorFn("expose") + generators := generateversioned.GeneratorFn("expose") generator, found := generators[serviceGenerator] if !found { return nil, fmt.Errorf("missing service generator: %s", serviceGenerator) @@ -627,8 +639,8 @@ func (o *RunOptions) generateService(f cmdutil.Factory, cmd *cobra.Command, serv return runObject, nil } -func (o *RunOptions) createGeneratedObject(f cmdutil.Factory, cmd *cobra.Command, generator kubectl.Generator, names []kubectl.GeneratorParam, params map[string]interface{}, overrides, namespace string) (*RunObject, error) { - err := kubectl.ValidateParams(names, params) +func (o *RunOptions) createGeneratedObject(f cmdutil.Factory, cmd *cobra.Command, generator generate.Generator, names []generate.GeneratorParam, params map[string]interface{}, overrides, namespace string) (*RunObject, error) { + err := generate.ValidateParams(names, params) if err != nil { return nil, err } @@ -662,7 +674,7 @@ func (o *RunOptions) createGeneratedObject(f cmdutil.Factory, cmd *cobra.Command } if err := o.Recorder.Record(obj); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } actualObj := obj @@ -679,7 +691,6 @@ func (o *RunOptions) createGeneratedObject(f cmdutil.Factory, cmd *cobra.Command return nil, err } } - actualObj = cmdutil.AsDefaultVersionedOrOriginal(actualObj, mapping) return &RunObject{ Object: actualObj, diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale/BUILD.bazel new file mode 100644 index 0000000000000..e0c3b42748c6f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale/BUILD.bazel @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "scale.go", + "scalejob.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/scale", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/batch/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale/scale.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale/scale.go index 932ae8702b664..997d37cefbb9e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale/scale.go @@ -14,14 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package scale import ( "fmt" "time" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" @@ -32,10 +32,9 @@ import ( "k8s.io/client-go/kubernetes" batchclient "k8s.io/client-go/kubernetes/typed/batch/v1" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/scalejob" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -65,6 +64,10 @@ var ( kubectl scale --replicas=3 statefulset/web`)) ) +const ( + timeout = 5 * time.Minute +) + type ScaleOptions struct { FilenameOptions resource.FilenameOptions RecordFlags *genericclioptions.RecordFlags @@ -109,11 +112,11 @@ func NewCmdScale(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr validArgs := []string{"deployment", "replicaset", "replicationcontroller", "statefulset"} cmd := &cobra.Command{ - Use: "scale [--resource-version=version] [--current-replicas=count] --replicas=COUNT (-f FILENAME | TYPE NAME)", + Use: "scale [--resource-version=version] [--current-replicas=count] --replicas=COUNT (-f FILENAME | TYPE NAME)", DisableFlagsInUseLine: true, - Short: i18n.T("Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job"), - Long: scaleLong, - Example: scaleExample, + Short: i18n.T("Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job"), + Long: scaleLong, + Example: scaleExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate(cmd)) @@ -237,7 +240,7 @@ func (o *ScaleOptions) RunScale() error { // if the recorder makes a change, compute and create another patch if mergePatch, err := o.Recorder.MakeRecordMergePatch(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } else if len(mergePatch) > 0 { client, err := o.unstructuredClientForMapping(mapping) if err != nil { @@ -245,7 +248,7 @@ func (o *ScaleOptions) RunScale() error { } helper := resource.NewHelper(client, mapping) if _, err := helper.Patch(info.Namespace, info.Name, types.MergePatchType, mergePatch, nil); err != nil { - glog.V(4).Infof("error recording reason: %v", err) + klog.V(4).Infof("error recording reason: %v", err) } } @@ -262,20 +265,20 @@ func (o *ScaleOptions) RunScale() error { } func ScaleJob(info *resource.Info, jobsClient batchclient.JobsGetter, count uint, preconditions *kubectl.ScalePrecondition, retry, waitForReplicas *kubectl.RetryParams) error { - scaler := scalejob.JobPsuedoScaler{ + scaler := JobPsuedoScaler{ JobsClient: jobsClient, } - var jobPreconditions *scalejob.ScalePrecondition + var jobPreconditions *ScalePrecondition if preconditions != nil { - jobPreconditions = &scalejob.ScalePrecondition{Size: preconditions.Size, ResourceVersion: preconditions.ResourceVersion} + jobPreconditions = &ScalePrecondition{Size: preconditions.Size, ResourceVersion: preconditions.ResourceVersion} } - var jobRetry *scalejob.RetryParams + var jobRetry *RetryParams if retry != nil { - jobRetry = &scalejob.RetryParams{Interval: retry.Interval, Timeout: retry.Timeout} + jobRetry = &RetryParams{Interval: retry.Interval, Timeout: retry.Timeout} } - var jobWaitForReplicas *scalejob.RetryParams + var jobWaitForReplicas *RetryParams if waitForReplicas != nil { - jobWaitForReplicas = &scalejob.RetryParams{Interval: waitForReplicas.Interval, Timeout: waitForReplicas.Timeout} + jobWaitForReplicas = &RetryParams{Interval: waitForReplicas.Interval, Timeout: waitForReplicas.Timeout} } return scaler.Scale(info.Namespace, info.Name, count, jobPreconditions, jobRetry, jobWaitForReplicas) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/scalejob.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale/scalejob.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/scalejob.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale/scalejob.go index 5324c33219be9..590bc4d05eb71 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/scalejob.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale/scalejob.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package scalejob +package scale import ( "fmt" diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/BUILD.bazel deleted file mode 100644 index 07189c0eb0a20..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "scalejob.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob", - importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/scalejob", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/BUILD.bazel index a85febd7bafbf..e30c70ed7a2a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/BUILD.bazel @@ -16,7 +16,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/set", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", @@ -33,12 +32,13 @@ go_library( "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/env:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/env/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/env/BUILD.bazel index 661f964f2580d..e0ea571bbc113 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/env/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/env/BUILD.bazel @@ -12,11 +12,12 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/v1/resource:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/fieldpath:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/env/env_resolve.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/env/env_resolve.go index ede70215ad42b..2328ef7d3bd02 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/env/env_resolve.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/env/env_resolve.go @@ -18,31 +18,36 @@ package env import ( "fmt" + "math" + "strconv" + "strings" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api/v1/resource" - "k8s.io/kubernetes/pkg/fieldpath" ) // ResourceStore defines a new resource store data structure. type ResourceStore struct { - SecretStore map[string]*v1.Secret - ConfigMapStore map[string]*v1.ConfigMap + SecretStore map[string]*corev1.Secret + ConfigMapStore map[string]*corev1.ConfigMap } // NewResourceStore returns a pointer to a new resource store data structure. func NewResourceStore() *ResourceStore { return &ResourceStore{ - SecretStore: make(map[string]*v1.Secret), - ConfigMapStore: make(map[string]*v1.ConfigMap), + SecretStore: make(map[string]*corev1.Secret), + ConfigMapStore: make(map[string]*corev1.ConfigMap), } } // getSecretRefValue returns the value of a secret in the supplied namespace -func getSecretRefValue(client kubernetes.Interface, namespace string, store *ResourceStore, secretSelector *v1.SecretKeySelector) (string, error) { +func getSecretRefValue(client kubernetes.Interface, namespace string, store *ResourceStore, secretSelector *corev1.SecretKeySelector) (string, error) { secret, ok := store.SecretStore[secretSelector.Name] if !ok { var err error @@ -60,7 +65,7 @@ func getSecretRefValue(client kubernetes.Interface, namespace string, store *Res } // getConfigMapRefValue returns the value of a configmap in the supplied namespace -func getConfigMapRefValue(client kubernetes.Interface, namespace string, store *ResourceStore, configMapSelector *v1.ConfigMapKeySelector) (string, error) { +func getConfigMapRefValue(client kubernetes.Interface, namespace string, store *ResourceStore, configMapSelector *corev1.ConfigMapKeySelector) (string, error) { configMap, ok := store.ConfigMapStore[configMapSelector.Name] if !ok { var err error @@ -77,17 +82,149 @@ func getConfigMapRefValue(client kubernetes.Interface, namespace string, store * } // getFieldRef returns the value of the supplied path in the given object -func getFieldRef(obj runtime.Object, from *v1.EnvVarSource) (string, error) { - return fieldpath.ExtractFieldPathAsString(obj, from.FieldRef.FieldPath) +func getFieldRef(obj runtime.Object, from *corev1.EnvVarSource) (string, error) { + return extractFieldPathAsString(obj, from.FieldRef.FieldPath) +} + +// extractFieldPathAsString extracts the field from the given object +// and returns it as a string. The object must be a pointer to an +// API type. +func extractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", nil + } + + if path, subscript, ok := splitMaybeSubscriptedPath(fieldPath); ok { + switch path { + case "metadata.annotations": + if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetAnnotations()[subscript], nil + case "metadata.labels": + if errs := validation.IsQualifiedName(subscript); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetLabels()[subscript], nil + default: + return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath) + } + } + + switch fieldPath { + case "metadata.annotations": + return formatMap(accessor.GetAnnotations()), nil + case "metadata.labels": + return formatMap(accessor.GetLabels()), nil + case "metadata.name": + return accessor.GetName(), nil + case "metadata.namespace": + return accessor.GetNamespace(), nil + case "metadata.uid": + return string(accessor.GetUID()), nil + } + + return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath) +} + +// splitMaybeSubscriptedPath checks whether the specified fieldPath is +// subscripted, and +// - if yes, this function splits the fieldPath into path and subscript, and +// returns (path, subscript, true). +// - if no, this function returns (fieldPath, "", false). +// +// Example inputs and outputs: +// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) +// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) +// - "metadata.labels['']" --> ("metadata.labels", "", true) +// - "metadata.labels" --> ("metadata.labels", "", false) +func splitMaybeSubscriptedPath(fieldPath string) (string, string, bool) { + if !strings.HasSuffix(fieldPath, "']") { + return fieldPath, "", false + } + s := strings.TrimSuffix(fieldPath, "']") + parts := strings.SplitN(s, "['", 2) + if len(parts) < 2 { + return fieldPath, "", false + } + if len(parts[0]) == 0 { + return fieldPath, "", false + } + return parts[0], parts[1], true +} + +// formatMap formats map[string]string to a string. +func formatMap(m map[string]string) (fmtStr string) { + // output with keys in sorted order to provide stable output + keys := sets.NewString() + for key := range m { + keys.Insert(key) + } + for _, key := range keys.List() { + fmtStr += fmt.Sprintf("%v=%q\n", key, m[key]) + } + fmtStr = strings.TrimSuffix(fmtStr, "\n") + + return } // getResourceFieldRef returns the value of a resource in the given container -func getResourceFieldRef(from *v1.EnvVarSource, c *v1.Container) (string, error) { - return resource.ExtractContainerResourceValue(from.ResourceFieldRef, c) +func getResourceFieldRef(from *corev1.EnvVarSource, container *corev1.Container) (string, error) { + return extractContainerResourceValue(from.ResourceFieldRef, container) +} + +// ExtractContainerResourceValue extracts the value of a resource +// in an already known container +func extractContainerResourceValue(fs *corev1.ResourceFieldSelector, container *corev1.Container) (string, error) { + divisor := resource.Quantity{} + if divisor.Cmp(fs.Divisor) == 0 { + divisor = resource.MustParse("1") + } else { + divisor = fs.Divisor + } + + switch fs.Resource { + case "limits.cpu": + return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) + case "limits.memory": + return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) + case "limits.ephemeral-storage": + return convertResourceEphemeralStorageToString(container.Resources.Limits.StorageEphemeral(), divisor) + case "requests.cpu": + return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) + case "requests.memory": + return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) + case "requests.ephemeral-storage": + return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor) + } + + return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) +} + +// convertResourceCPUToString converts cpu value to the format of divisor and returns +// ceiling of the value. +func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { + c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) + return strconv.FormatInt(c, 10), nil +} + +// convertResourceMemoryToString converts memory value to the format of divisor and returns +// ceiling of the value. +func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} + +// convertResourceEphemeralStorageToString converts ephemeral storage value to the format of divisor and returns +// ceiling of the value. +func convertResourceEphemeralStorageToString(ephemeralStorage *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(ephemeralStorage.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil } // GetEnvVarRefValue returns the value referenced by the supplied EnvVarSource given the other supplied information. -func GetEnvVarRefValue(kc kubernetes.Interface, ns string, store *ResourceStore, from *v1.EnvVarSource, obj runtime.Object, c *v1.Container) (string, error) { +func GetEnvVarRefValue(kc kubernetes.Interface, ns string, store *ResourceStore, from *corev1.EnvVarSource, obj runtime.Object, c *corev1.Container) (string, error) { if from.SecretKeyRef != nil { return getSecretRefValue(kc, ns, store, from.SecretKeyRef) } @@ -108,7 +245,7 @@ func GetEnvVarRefValue(kc kubernetes.Interface, ns string, store *ResourceStore, } // GetEnvVarRefString returns a text description of whichever field is set within the supplied EnvVarSource argument. -func GetEnvVarRefString(from *v1.EnvVarSource) string { +func GetEnvVarRefString(from *corev1.EnvVarSource) string { if from.ConfigMapKeyRef != nil { return fmt.Sprintf("configmap %s, key %s", from.ConfigMapKeyRef.Name, from.ConfigMapKeyRef.Key) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set.go index d45ce33e2ab6e..a50b7f953746d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set.go @@ -19,9 +19,9 @@ package set import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -33,11 +33,11 @@ var ( func NewCmdSet(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { cmd := &cobra.Command{ - Use: "set SUBCOMMAND", + Use: "set SUBCOMMAND", DisableFlagsInUseLine: true, - Short: i18n.T("Set specific features on objects"), - Long: set_long, - Run: cmdutil.DefaultSubCommandRun(streams.ErrOut), + Short: i18n.T("Set specific features on objects"), + Long: set_long, + Run: cmdutil.DefaultSubCommandRun(streams.ErrOut), } // add subcommands diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_env.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_env.go index 0a315607088f8..62aed433dfe69 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_env.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_env.go @@ -35,10 +35,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/client-go/kubernetes" envutil "k8s.io/kubernetes/pkg/kubectl/cmd/set/env" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -145,11 +145,11 @@ func NewEnvOptions(streams genericclioptions.IOStreams) *EnvOptions { func NewCmdEnv(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewEnvOptions(streams) cmd := &cobra.Command{ - Use: "env RESOURCE/NAME KEY_1=VAL_1 ... KEY_N=VAL_N", + Use: "env RESOURCE/NAME KEY_1=VAL_1 ... KEY_N=VAL_N", DisableFlagsInUseLine: true, - Short: "Update environment variables on a pod template", - Long: envLong, - Example: fmt.Sprintf(envExample), + Short: "Update environment variables on a pod template", + Long: envLong, + Example: fmt.Sprintf(envExample), Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_image.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_image.go index c97102e31cecb..38677a803f086 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_image.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_image.go @@ -19,8 +19,8 @@ package set import ( "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -29,11 +29,11 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // ImageOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of @@ -101,11 +101,11 @@ func NewCmdImage(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra. o := NewImageOptions(streams) cmd := &cobra.Command{ - Use: "image (-f FILENAME | TYPE NAME) CONTAINER_NAME_1=CONTAINER_IMAGE_1 ... CONTAINER_NAME_N=CONTAINER_IMAGE_N", + Use: "image (-f FILENAME | TYPE NAME) CONTAINER_NAME_1=CONTAINER_IMAGE_1 ... CONTAINER_NAME_N=CONTAINER_IMAGE_N", DisableFlagsInUseLine: true, - Short: i18n.T("Update image of a pod template"), - Long: image_long, - Example: image_example, + Short: i18n.T("Update image of a pod template"), + Long: image_long, + Example: image_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -256,7 +256,7 @@ func (o *SetImageOptions) Run() error { } // record this change (for rollout history) if err := o.Recorder.Record(obj); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } return runtime.Encode(scheme.DefaultJSONEncoder(), obj) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_resources.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_resources.go index d2010a79dd74e..b7328a0cb4578 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_resources.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_resources.go @@ -19,8 +19,8 @@ package set import ( "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -29,12 +29,12 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + generateversioned "k8s.io/kubernetes/pkg/kubectl/generate/versioned" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -108,11 +108,11 @@ func NewCmdResources(f cmdutil.Factory, streams genericclioptions.IOStreams) *co o := NewResourcesOptions(streams) cmd := &cobra.Command{ - Use: "resources (-f FILENAME | TYPE NAME) ([--limits=LIMITS & --requests=REQUESTS]", + Use: "resources (-f FILENAME | TYPE NAME) ([--limits=LIMITS & --requests=REQUESTS]", DisableFlagsInUseLine: true, - Short: i18n.T("Update resource requests/limits on objects with pod templates"), - Long: fmt.Sprintf(resources_long, cmdutil.SuggestApiResources("kubectl")), - Example: resources_example, + Short: i18n.T("Update resource requests/limits on objects with pod templates"), + Long: fmt.Sprintf(resources_long, cmdutil.SuggestApiResources("kubectl")), + Example: resources_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -206,7 +206,7 @@ func (o *SetResourcesOptions) Validate() error { return fmt.Errorf("you must specify an update to requests or limits (in the form of --requests/--limits)") } - o.ResourceRequirements, err = kubectl.HandleResourceRequirementsV1(map[string]string{"limits": o.Limits, "requests": o.Requests}) + o.ResourceRequirements, err = generateversioned.HandleResourceRequirementsV1(map[string]string{"limits": o.Limits, "requests": o.Requests}) if err != nil { return err } @@ -250,7 +250,7 @@ func (o *SetResourcesOptions) Run() error { } // record this change (for rollout history) if err := o.Recorder.Record(obj); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } return runtime.Encode(scheme.DefaultJSONEncoder(), obj) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_selector.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_selector.go index 8593f92fbd34c..167ae74717038 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_selector.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_selector.go @@ -19,8 +19,8 @@ package set import ( "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,10 +30,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // SelectorOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of @@ -95,11 +95,11 @@ func NewCmdSelector(f cmdutil.Factory, streams genericclioptions.IOStreams) *cob o := NewSelectorOptions(streams) cmd := &cobra.Command{ - Use: "selector (-f FILENAME | TYPE NAME) EXPRESSIONS [--resource-version=version]", + Use: "selector (-f FILENAME | TYPE NAME) EXPRESSIONS [--resource-version=version]", DisableFlagsInUseLine: true, - Short: i18n.T("Set the selector on a resource"), - Long: fmt.Sprintf(selectorLong, validation.LabelValueMaxLength), - Example: selectorExample, + Short: i18n.T("Set the selector on a resource"), + Long: fmt.Sprintf(selectorLong, validation.LabelValueMaxLength), + Example: selectorExample, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) @@ -171,7 +171,7 @@ func (o *SetSelectorOptions) RunSelector() error { // record this change (for rollout history) if err := o.Recorder.Record(patch.Info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } return runtime.Encode(scheme.DefaultJSONEncoder(), info.Object) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_serviceaccount.go index 2a41b12780aa2..eced76f7e9c3c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_serviceaccount.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_serviceaccount.go @@ -20,8 +20,8 @@ import ( "errors" "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -30,11 +30,11 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -93,7 +93,7 @@ func NewCmdServiceAccount(f cmdutil.Factory, streams genericclioptions.IOStreams o := NewSetServiceAccountOptions(streams) cmd := &cobra.Command{ - Use: "serviceaccount (-f FILENAME | TYPE NAME) SERVICE_ACCOUNT", + Use: "serviceaccount (-f FILENAME | TYPE NAME) SERVICE_ACCOUNT", DisableFlagsInUseLine: true, Aliases: []string{"sa"}, Short: i18n.T("Update ServiceAccount of a resource"), @@ -183,7 +183,7 @@ func (o *SetServiceAccountOptions) Run() error { } // record this change (for rollout history) if err := o.Recorder.Record(obj); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } return runtime.Encode(scheme.DefaultJSONEncoder(), obj) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_subject.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_subject.go index 7e0e896be2f2f..a4a245e28558f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_subject.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_subject.go @@ -30,10 +30,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -90,11 +90,11 @@ func NewSubjectOptions(streams genericclioptions.IOStreams) *SubjectOptions { func NewCmdSubject(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewSubjectOptions(streams) cmd := &cobra.Command{ - Use: "subject (-f FILENAME | TYPE NAME) [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run]", + Use: "subject (-f FILENAME | TYPE NAME) [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run]", DisableFlagsInUseLine: true, - Short: i18n.T("Update User, Group or ServiceAccount in a RoleBinding/ClusterRoleBinding"), - Long: subject_long, - Example: subject_example, + Short: i18n.T("Update User, Group or ServiceAccount in a RoleBinding/ClusterRoleBinding"), + Long: subject_long, + Example: subject_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint/BUILD.bazel new file mode 100644 index 0000000000000..fc95b6e213155 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "taint.go", + "utils.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/taint", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint/taint.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint/taint.go index cb43d035679fd..02f5651307983 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint/taint.go @@ -14,15 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package taint import ( "encoding/json" "fmt" "strings" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -33,11 +33,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - taintutils "k8s.io/kubernetes/pkg/util/taints" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // TaintOptions have the data required to perform the taint operation @@ -93,21 +92,15 @@ func NewCmdTaint(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra. validArgs := []string{"node"} cmd := &cobra.Command{ - Use: "taint NODE NAME KEY_1=VAL_1:TAINT_EFFECT_1 ... KEY_N=VAL_N:TAINT_EFFECT_N", + Use: "taint NODE NAME KEY_1=VAL_1:TAINT_EFFECT_1 ... KEY_N=VAL_N:TAINT_EFFECT_N", DisableFlagsInUseLine: true, - Short: i18n.T("Update the taints on one or more nodes"), - Long: fmt.Sprintf(taintLong, validation.DNS1123SubdomainMaxLength, validation.LabelValueMaxLength), - Example: taintExample, + Short: i18n.T("Update the taints on one or more nodes"), + Long: fmt.Sprintf(taintLong, validation.DNS1123SubdomainMaxLength, validation.LabelValueMaxLength), + Example: taintExample, Run: func(cmd *cobra.Command, args []string) { - if err := options.Complete(f, cmd, args); err != nil { - cmdutil.CheckErr(err) - } - if err := options.Validate(); err != nil { - cmdutil.CheckErr(cmdutil.UsageErrorf(cmd, err.Error())) - } - if err := options.RunTaint(); err != nil { - cmdutil.CheckErr(err) - } + cmdutil.CheckErr(options.Complete(f, cmd, args)) + cmdutil.CheckErr(options.Validate()) + cmdutil.CheckErr(options.RunTaint()) }, ValidArgs: validArgs, } @@ -159,7 +152,7 @@ func (o *TaintOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st return fmt.Errorf("at least one taint update is required") } - if o.taintsToAdd, o.taintsToRemove, err = taintutils.ParseTaints(taintArgs); err != nil { + if o.taintsToAdd, o.taintsToRemove, err = parseTaints(taintArgs); err != nil { return cmdutil.UsageErrorf(cmd, err.Error()) } o.builder = f.NewBuilder(). @@ -262,7 +255,7 @@ func (o TaintOptions) RunTaint() error { patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, obj) createdPatch := err == nil if err != nil { - glog.V(2).Infof("couldn't compute patch: %v", err) + klog.V(2).Infof("couldn't compute patch: %v", err) } mapping := info.ResourceMapping() @@ -281,7 +274,6 @@ func (o TaintOptions) RunTaint() error { if err != nil { return err } - outputObj = cmdutil.AsDefaultVersionedOrOriginal(outputObj, mapping) printer, err := o.ToPrinter(operation) if err != nil { @@ -298,11 +290,11 @@ func (o TaintOptions) updateTaints(obj runtime.Object) (string, error) { return "", fmt.Errorf("unexpected type %T, expected Node", obj) } if !o.overwrite { - if exists := taintutils.CheckIfTaintsAlreadyExists(node.Spec.Taints, o.taintsToAdd); len(exists) != 0 { + if exists := checkIfTaintsAlreadyExists(node.Spec.Taints, o.taintsToAdd); len(exists) != 0 { return "", fmt.Errorf("Node %s already has %v taint(s) with same effect(s) and --overwrite is false", node.Name, exists) } } - operation, newTaints, err := taintutils.ReorganizeTaints(node, o.overwrite, o.taintsToAdd, o.taintsToRemove) + operation, newTaints, err := reorganizeTaints(node, o.overwrite, o.taintsToAdd, o.taintsToRemove) if err != nil { return "", err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint/utils.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint/utils.go new file mode 100644 index 0000000000000..46245fb24ecf6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/taint/utils.go @@ -0,0 +1,209 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package taints implements utilites for working with taints +package taint + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" +) + +// Exported taint constant strings +const ( + MODIFIED = "modified" + TAINTED = "tainted" + UNTAINTED = "untainted" +) + +// parseTaints takes a spec which is an array and creates slices for new taints to be added, taints to be deleted. +func parseTaints(spec []string) ([]corev1.Taint, []corev1.Taint, error) { + var taints, taintsToRemove []corev1.Taint + uniqueTaints := map[corev1.TaintEffect]sets.String{} + + for _, taintSpec := range spec { + if strings.Index(taintSpec, "=") != -1 && strings.Index(taintSpec, ":") != -1 { + newTaint, err := parseTaint(taintSpec) + if err != nil { + return nil, nil, err + } + // validate if taint is unique by + if len(uniqueTaints[newTaint.Effect]) > 0 && uniqueTaints[newTaint.Effect].Has(newTaint.Key) { + return nil, nil, fmt.Errorf("duplicated taints with the same key and effect: %v", newTaint) + } + // add taint to existingTaints for uniqueness check + if len(uniqueTaints[newTaint.Effect]) == 0 { + uniqueTaints[newTaint.Effect] = sets.String{} + } + uniqueTaints[newTaint.Effect].Insert(newTaint.Key) + + taints = append(taints, newTaint) + } else if strings.HasSuffix(taintSpec, "-") { + taintKey := taintSpec[:len(taintSpec)-1] + var effect corev1.TaintEffect + if strings.Index(taintKey, ":") != -1 { + parts := strings.Split(taintKey, ":") + taintKey = parts[0] + effect = corev1.TaintEffect(parts[1]) + } + + // If effect is specified, need to validate it. + if len(effect) > 0 { + err := validateTaintEffect(effect) + if err != nil { + return nil, nil, err + } + } + taintsToRemove = append(taintsToRemove, corev1.Taint{Key: taintKey, Effect: effect}) + } else { + return nil, nil, fmt.Errorf("unknown taint spec: %v", taintSpec) + } + } + return taints, taintsToRemove, nil +} + +// parseTaint parses a taint from a string. Taint must be of the format '=:'. +func parseTaint(st string) (corev1.Taint, error) { + var taint corev1.Taint + parts := strings.Split(st, "=") + if len(parts) != 2 || len(parts[1]) == 0 || len(validation.IsQualifiedName(parts[0])) > 0 { + return taint, fmt.Errorf("invalid taint spec: %v", st) + } + + parts2 := strings.Split(parts[1], ":") + + errs := validation.IsValidLabelValue(parts2[0]) + if len(parts2) != 2 || len(errs) != 0 { + return taint, fmt.Errorf("invalid taint spec: %v, %s", st, strings.Join(errs, "; ")) + } + + effect := corev1.TaintEffect(parts2[1]) + if err := validateTaintEffect(effect); err != nil { + return taint, err + } + + taint.Key = parts[0] + taint.Value = parts2[0] + taint.Effect = effect + + return taint, nil +} + +func validateTaintEffect(effect corev1.TaintEffect) error { + if effect != corev1.TaintEffectNoSchedule && effect != corev1.TaintEffectPreferNoSchedule && effect != corev1.TaintEffectNoExecute { + return fmt.Errorf("invalid taint effect: %v, unsupported taint effect", effect) + } + + return nil +} + +// ReorganizeTaints returns the updated set of taints, taking into account old taints that were not updated, +// old taints that were updated, old taints that were deleted, and new taints. +func reorganizeTaints(node *corev1.Node, overwrite bool, taintsToAdd []corev1.Taint, taintsToRemove []corev1.Taint) (string, []corev1.Taint, error) { + newTaints := append([]corev1.Taint{}, taintsToAdd...) + oldTaints := node.Spec.Taints + // add taints that already existing but not updated to newTaints + added := addTaints(oldTaints, &newTaints) + allErrs, deleted := deleteTaints(taintsToRemove, &newTaints) + if (added && deleted) || overwrite { + return MODIFIED, newTaints, utilerrors.NewAggregate(allErrs) + } else if added { + return TAINTED, newTaints, utilerrors.NewAggregate(allErrs) + } + return UNTAINTED, newTaints, utilerrors.NewAggregate(allErrs) +} + +// deleteTaints deletes the given taints from the node's taintlist. +func deleteTaints(taintsToRemove []corev1.Taint, newTaints *[]corev1.Taint) ([]error, bool) { + allErrs := []error{} + var removed bool + for _, taintToRemove := range taintsToRemove { + removed = false + if len(taintToRemove.Effect) > 0 { + *newTaints, removed = deleteTaint(*newTaints, &taintToRemove) + } else { + *newTaints, removed = deleteTaintsByKey(*newTaints, taintToRemove.Key) + } + if !removed { + allErrs = append(allErrs, fmt.Errorf("taint %q not found", taintToRemove.ToString())) + } + } + return allErrs, removed +} + +// addTaints adds the newTaints list to existing ones and updates the newTaints List. +// TODO: This needs a rewrite to take only the new values instead of appended newTaints list to be consistent. +func addTaints(oldTaints []corev1.Taint, newTaints *[]corev1.Taint) bool { + for _, oldTaint := range oldTaints { + existsInNew := false + for _, taint := range *newTaints { + if taint.MatchTaint(&oldTaint) { + existsInNew = true + break + } + } + if !existsInNew { + *newTaints = append(*newTaints, oldTaint) + } + } + return len(oldTaints) != len(*newTaints) +} + +// CheckIfTaintsAlreadyExists checks if the node already has taints that we want to add and returns a string with taint keys. +func checkIfTaintsAlreadyExists(oldTaints []corev1.Taint, taints []corev1.Taint) string { + var existingTaintList = make([]string, 0) + for _, taint := range taints { + for _, oldTaint := range oldTaints { + if taint.Key == oldTaint.Key && taint.Effect == oldTaint.Effect { + existingTaintList = append(existingTaintList, taint.Key) + } + } + } + return strings.Join(existingTaintList, ",") +} + +// DeleteTaintsByKey removes all the taints that have the same key to given taintKey +func deleteTaintsByKey(taints []corev1.Taint, taintKey string) ([]corev1.Taint, bool) { + newTaints := []corev1.Taint{} + deleted := false + for i := range taints { + if taintKey == taints[i].Key { + deleted = true + continue + } + newTaints = append(newTaints, taints[i]) + } + return newTaints, deleted +} + +// DeleteTaint removes all the taints that have the same key and effect to given taintToDelete. +func deleteTaint(taints []corev1.Taint, taintToDelete *corev1.Taint) ([]corev1.Taint, bool) { + newTaints := []corev1.Taint{} + deleted := false + for i := range taints { + if taintToDelete.MatchTaint(&taints[i]) { + deleted = true + continue + } + newTaints = append(newTaints, taints[i]) + } + return newTaints, deleted +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/BUILD.bazel new file mode 100644 index 0000000000000..ace286a631745 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "top.go", + "top_node.go", + "top_pod.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/top", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + "//vendor/k8s.io/metrics/pkg/apis/metrics:go_default_library", + "//vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library", + "//vendor/k8s.io/metrics/pkg/client/clientset/versioned:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top.go index 79608aa677769..27e6e725ae415 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top.go @@ -14,17 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package top import ( + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" metricsapi "k8s.io/metrics/pkg/apis/metrics" - - "github.com/spf13/cobra" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" ) var ( diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top_node.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_node.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top_node.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_node.go index c224d3f36e61c..48e7f48e0c74a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top_node.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_node.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package top import ( "errors" @@ -27,10 +27,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/discovery" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/metricsutil" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" metricsapi "k8s.io/metrics/pkg/apis/metrics" metricsV1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1" metricsclientset "k8s.io/metrics/pkg/client/clientset/versioned" @@ -40,6 +40,7 @@ import ( type TopNodeOptions struct { ResourceName string Selector string + NoHeaders bool NodeClient corev1client.CoreV1Interface HeapsterOptions HeapsterTopOptions Client *metricsutil.HeapsterMetricsClient @@ -99,25 +100,21 @@ func NewCmdTopNode(f cmdutil.Factory, o *TopNodeOptions, streams genericclioptio } cmd := &cobra.Command{ - Use: "node [NAME | -l label]", + Use: "node [NAME | -l label]", DisableFlagsInUseLine: true, - Short: i18n.T("Display Resource (CPU/Memory/Storage) usage of nodes"), - Long: topNodeLong, - Example: topNodeExample, + Short: i18n.T("Display Resource (CPU/Memory/Storage) usage of nodes"), + Long: topNodeLong, + Example: topNodeExample, Run: func(cmd *cobra.Command, args []string) { - if err := o.Complete(f, cmd, args); err != nil { - cmdutil.CheckErr(err) - } - if err := o.Validate(); err != nil { - cmdutil.CheckErr(cmdutil.UsageErrorf(cmd, "%v", err)) - } - if err := o.RunTopNode(); err != nil { - cmdutil.CheckErr(err) - } + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunTopNode()) }, Aliases: []string{"nodes", "no"}, } cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") + cmd.Flags().BoolVar(&o.NoHeaders, "no-headers", o.NoHeaders, "If present, print output without headers") + o.HeapsterOptions.Bind(cmd.Flags()) return cmd } @@ -216,7 +213,7 @@ func (o TopNodeOptions) RunTopNode() error { allocatable[n.Name] = n.Status.Allocatable } - return o.Printer.PrintNodeMetrics(metrics.Items, allocatable) + return o.Printer.PrintNodeMetrics(metrics.Items, allocatable, o.NoHeaders) } func getNodeMetricsFromMetricsAPI(metricsClient metricsclientset.Interface, resourceName string, selector labels.Selector) (*metricsapi.NodeMetricsList, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top_pod.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_pod.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top_pod.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_pod.go index e3a2c35e51700..5c9a3712761a1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top_pod.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_pod.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package top import ( "errors" @@ -26,17 +26,17 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/discovery" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/metricsutil" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" metricsapi "k8s.io/metrics/pkg/apis/metrics" metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1" metricsclientset "k8s.io/metrics/pkg/client/clientset/versioned" - "github.com/golang/glog" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/klog" ) type TopPodOptions struct { @@ -45,6 +45,7 @@ type TopPodOptions struct { Selector string AllNamespaces bool PrintContainers bool + NoHeaders bool PodClient corev1client.PodsGetter HeapsterOptions HeapsterTopOptions Client *metricsutil.HeapsterMetricsClient @@ -88,27 +89,22 @@ func NewCmdTopPod(f cmdutil.Factory, o *TopPodOptions, streams genericclioptions } cmd := &cobra.Command{ - Use: "pod [NAME | -l label]", + Use: "pod [NAME | -l label]", DisableFlagsInUseLine: true, - Short: i18n.T("Display Resource (CPU/Memory/Storage) usage of pods"), - Long: topPodLong, - Example: topPodExample, + Short: i18n.T("Display Resource (CPU/Memory/Storage) usage of pods"), + Long: topPodLong, + Example: topPodExample, Run: func(cmd *cobra.Command, args []string) { - if err := o.Complete(f, cmd, args); err != nil { - cmdutil.CheckErr(err) - } - if err := o.Validate(); err != nil { - cmdutil.CheckErr(cmdutil.UsageErrorf(cmd, "%v", err)) - } - if err := o.RunTopPod(); err != nil { - cmdutil.CheckErr(err) - } + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunTopPod()) }, Aliases: []string{"pods", "po"}, } cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") cmd.Flags().BoolVar(&o.PrintContainers, "containers", o.PrintContainers, "If present, print usage of containers within a pod.") cmd.Flags().BoolVar(&o.AllNamespaces, "all-namespaces", o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.") + cmd.Flags().BoolVar(&o.NoHeaders, "no-headers", o.NoHeaders, "If present, print output without headers.") o.HeapsterOptions.Bind(cmd.Flags()) return cmd } @@ -198,7 +194,7 @@ func (o TopPodOptions) RunTopPod() error { return err } - return o.Printer.PrintPodMetrics(metrics.Items, o.PrintContainers, o.AllNamespaces) + return o.Printer.PrintPodMetrics(metrics.Items, o.PrintContainers, o.AllNamespaces, o.NoHeaders) } func getMetricsFromMetricsAPI(metricsClient metricsclientset.Interface, namespace, resourceName string, allNamespaces bool, selector labels.Selector) (*metricsapi.PodMetricsList, error) { @@ -260,10 +256,10 @@ func checkPodAge(pod *v1.Pod) error { age := time.Since(pod.CreationTimestamp.Time) if age > metricsCreationDelay { message := fmt.Sprintf("Metrics not available for pod %s/%s, age: %s", pod.Namespace, pod.Name, age.String()) - glog.Warningf(message) + klog.Warningf(message) return errors.New(message) } else { - glog.V(2).Infof("Metrics not yet available for pod %s/%s, age: %s", pod.Namespace, pod.Name, age.String()) + klog.V(2).Infof("Metrics not yet available for pod %s/%s, age: %s", pod.Namespace, pod.Name, age.String()) return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD.bazel index 1af9d364a0e0b..e93e0b0654f7f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD.bazel @@ -3,10 +3,9 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "conversion.go", + "crdfinder.go", "factory.go", "factory_client_access.go", - "generator.go", "helpers.go", "kubectl_match_version.go", "printing.go", @@ -16,21 +15,16 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/k8s.io/api/apps/v1:go_default_library", - "//vendor/k8s.io/api/apps/v1beta1:go_default_library", - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/api/batch/v1beta1:go_default_library", - "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", @@ -42,16 +36,12 @@ go_library( "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/validation:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/printers:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/printers/internalversion:go_default_library", "//vendor/k8s.io/kubernetes/pkg/version:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/conversion.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/conversion.go deleted file mode 100644 index 98b02ee712774..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/conversion.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/api/legacyscheme" -) - -// AsDefaultVersionedOrOriginal returns the object as a Go object in the external form if possible (matching the -// group version kind of the mapping if provided, a best guess based on serialization if not provided, or obj if it cannot be converted. -// TODO update call sites to specify the scheme they want on their builder. -func AsDefaultVersionedOrOriginal(obj runtime.Object, mapping *meta.RESTMapping) runtime.Object { - converter := runtime.ObjectConvertor(legacyscheme.Scheme) - groupVersioner := runtime.GroupVersioner(schema.GroupVersions(legacyscheme.Scheme.PrioritizedVersionsAllGroups())) - if mapping != nil { - groupVersioner = mapping.GroupVersionKind.GroupVersion() - } - - if obj, err := converter.ConvertToVersion(obj, groupVersioner); err == nil { - return obj - } - return obj -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/crdfinder.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/crdfinder.go new file mode 100644 index 0000000000000..aaf309dca4403 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/crdfinder.go @@ -0,0 +1,109 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" +) + +// CRDGetter is a function that can download the list of GVK for all +// CRDs. +type CRDGetter func() ([]schema.GroupKind, error) + +func CRDFromDynamic(client dynamic.Interface) CRDGetter { + return func() ([]schema.GroupKind, error) { + list, err := client.Resource(schema.GroupVersionResource{ + Group: "apiextensions.k8s.io", + Version: "v1beta1", + Resource: "customresourcedefinitions", + }).List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to list CRDs: %v", err) + } + if list == nil { + return nil, nil + } + + gks := []schema.GroupKind{} + + // We need to parse the list to get the gvk, I guess that's fine. + for _, crd := range (*list).Items { + // Look for group, version, and kind + group, _, _ := unstructured.NestedString(crd.Object, "spec", "group") + kind, _, _ := unstructured.NestedString(crd.Object, "spec", "names", "kind") + + gks = append(gks, schema.GroupKind{ + Group: group, + Kind: kind, + }) + } + + return gks, nil + } +} + +// CRDFinder keeps a cache of known CRDs and finds a given GVK in the +// list. +type CRDFinder interface { + HasCRD(gvk schema.GroupKind) (bool, error) +} + +func NewCRDFinder(getter CRDGetter) CRDFinder { + return &crdFinder{ + getter: getter, + } +} + +type crdFinder struct { + getter CRDGetter + cache *[]schema.GroupKind +} + +func (f *crdFinder) cacheCRDs() error { + if f.cache != nil { + return nil + } + + list, err := f.getter() + if err != nil { + return err + } + f.cache = &list + return nil +} + +func (f *crdFinder) findCRD(gvk schema.GroupKind) bool { + for _, crd := range *f.cache { + if reflect.DeepEqual(gvk, crd) { + return true + } + } + return false +} + +func (f *crdFinder) HasCRD(gvk schema.GroupKind) (bool, error) { + if err := f.cacheCRDs(); err != nil { + return false, err + } + return f.findCRD(gvk), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/BUILD.bazel index e7ddfdf46a5da..f99a25776262c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/BUILD.bazel @@ -11,8 +11,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", @@ -26,7 +26,7 @@ go_library( "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editoptions.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editoptions.go index f94f3cd2c5150..2b2add4585941 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editoptions.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editoptions.go @@ -29,9 +29,10 @@ import ( "strings" "github.com/evanphx/json-patch" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -45,7 +46,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf" @@ -287,7 +287,7 @@ func (o *EditOptions) Run() error { if len(results.file) > 0 { os.Remove(results.file) } - glog.V(4).Infof("User edited:\n%s", string(edited)) + klog.V(4).Infof("User edited:\n%s", string(edited)) // Apply validation schema, err := o.f.Validator(o.EnableValidation) @@ -300,7 +300,8 @@ func (o *EditOptions) Run() error { file: file, } containsError = true - fmt.Fprintln(o.ErrOut, results.addError(apierrors.NewInvalid(api.Kind(""), "", field.ErrorList{field.Invalid(nil, "The edited file failed validation", fmt.Sprintf("%v", err))}), infos[0])) + fmt.Fprintln(o.ErrOut, results.addError(apierrors.NewInvalid(corev1.SchemeGroupVersion.WithKind("").GroupKind(), + "", field.ErrorList{field.Invalid(nil, "The edited file failed validation", fmt.Sprintf("%v", err))}), infos[0])) continue } @@ -524,7 +525,7 @@ func GetApplyPatch(obj runtime.Unstructured) ([]byte, []byte, types.PatchType, e if annotations == nil { annotations = map[string]string{} } - annotations[api.LastAppliedConfigAnnotation] = string(beforeJSON) + annotations[corev1.LastAppliedConfigAnnotation] = string(beforeJSON) accessor.SetAnnotations(objCopy, annotations) afterJSON, err := encodeToJson(objCopy.(runtime.Unstructured)) if err != nil { @@ -605,12 +606,12 @@ func (o *EditOptions) visitToPatch(originalInfos []*resource.Info, patchVisitor patchType = types.MergePatchType patch, err = jsonpatch.CreateMergePatch(originalJS, editedJS) if err != nil { - glog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) + klog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) return err } for _, precondition := range preconditions { if !precondition(patch) { - glog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) + klog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) return fmt.Errorf("%s", "At least one of apiVersion, kind and name was changed") } } @@ -620,7 +621,7 @@ func (o *EditOptions) visitToPatch(originalInfos []*resource.Info, patchVisitor patchType = types.StrategicMergePatchType patch, err = strategicpatch.CreateTwoWayMergePatch(originalJS, editedJS, versionedObject, preconditions...) if err != nil { - glog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) + klog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) if mergepatch.IsPreconditionFailed(err) { return fmt.Errorf("%s", "At least one of apiVersion, kind and name was changed") } @@ -668,12 +669,12 @@ func (o *EditOptions) visitAnnotation(annotationVisitor resource.Visitor) error err := annotationVisitor.Visit(func(info *resource.Info, incomingErr error) error { // put configuration annotation in "updates" if o.ApplyAnnotation { - if err := kubectl.CreateOrUpdateAnnotation(true, info.Object, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(true, info.Object, scheme.DefaultJSONEncoder()); err != nil { return err } } if err := o.Recorder.Record(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor.go index a53e8c0970a4a..e7229870ec7a3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor.go @@ -27,7 +27,7 @@ import ( "runtime" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubectl/util/term" ) @@ -124,7 +124,7 @@ func (e Editor) Launch(path string) error { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin - glog.V(5).Infof("Opening file with editor %v", args) + klog.V(5).Infof("Opening file with editor %v", args) if err := (term.TTY{In: os.Stdin, TryDev: true}).Safe(cmd.Run); err != nil { if err, ok := err.(*exec.Error); ok { if err.Err == exec.ErrNotFound { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go index 14368758ef493..b923a4b149e63 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go @@ -21,9 +21,8 @@ package util import ( "sync" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/client-go/discovery" @@ -31,8 +30,6 @@ import ( "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/pkg/api/legacyscheme" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" openapivalidation "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation" "k8s.io/kubernetes/pkg/kubectl/validation" @@ -118,7 +115,7 @@ func (f *factoryImpl) ClientForMapping(mapping *meta.RESTMapping) (resource.REST } gvk := mapping.GroupVersionKind switch gvk.Group { - case api.GroupName: + case corev1.GroupName: cfg.APIPath = "/api" default: cfg.APIPath = "/apis" @@ -137,7 +134,7 @@ func (f *factoryImpl) UnstructuredClientForMapping(mapping *meta.RESTMapping) (r return nil, err } cfg.APIPath = "/apis" - if mapping.GroupVersionKind.Group == api.GroupName { + if mapping.GroupVersionKind.Group == corev1.GroupName { cfg.APIPath = "/api" } gv := mapping.GroupVersionKind.GroupVersion() @@ -178,13 +175,3 @@ func (f *factoryImpl) OpenAPISchema() (openapi.Resources, error) { // Delegate to the OpenAPIGetter return f.openAPIGetter.getter.Get() } - -// this method exists to help us find the points still relying on internal types. -func InternalVersionDecoder() runtime.Decoder { - return legacyscheme.Codecs.UniversalDecoder() -} - -func InternalVersionJSONEncoder() runtime.Encoder { - encoder := legacyscheme.Codecs.LegacyCodec(legacyscheme.Scheme.PrioritizedVersionsAllGroups()...) - return unstructured.JSONFallbackEncoder{Encoder: encoder} -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go index 36bdc6ca31b9c..9a392cde59b00 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go @@ -27,9 +27,9 @@ import ( "time" "github.com/evanphx/json-patch" - "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/pflag" + "k8s.io/klog" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -44,9 +44,6 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/scale" "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/printers" - printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" utilexec "k8s.io/utils/exec" ) @@ -91,10 +88,10 @@ func DefaultBehaviorOnFatal() { } // fatal prints the message (if provided) and then exits. If V(2) or greater, -// glog.Fatal is invoked for extended information. +// klog.Fatal is invoked for extended information. func fatal(msg string, code int) { - if glog.V(2) { - glog.FatalDepth(2, msg) + if klog.V(2) { + klog.FatalDepth(2, msg) } if len(msg) > 0 { // add newline if needed @@ -192,13 +189,13 @@ func statusCausesToAggrError(scs []metav1.StatusCause) utilerrors.Aggregate { // StandardErrorMessage translates common errors into a human readable message, or returns // false if the error is not one of the recognized types. It may also log extended -// information to glog. +// information to klog. // // This method is generic to the command in use and may be used by non-Kubectl // commands. func StandardErrorMessage(err error) (string, bool) { if debugErr, ok := err.(debugError); ok { - glog.V(4).Infof(debugErr.DebugError()) + klog.V(4).Infof(debugErr.DebugError()) } status, isStatus := err.(kerrors.APIStatus) switch { @@ -216,7 +213,7 @@ func StandardErrorMessage(err error) (string, bool) { } switch t := err.(type) { case *url.Error: - glog.V(4).Infof("Connection error: %s %s: %v", t.Op, t.URL, t.Err) + klog.V(4).Infof("Connection error: %s %s: %v", t.Op, t.URL, t.Err) switch { case strings.Contains(t.Err.Error(), "connection refused"): host := t.URL @@ -303,7 +300,7 @@ func IsFilenameSliceEmpty(filenames []string) bool { func GetFlagString(cmd *cobra.Command, flag string) string { s, err := cmd.Flags().GetString(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return s } @@ -312,7 +309,7 @@ func GetFlagString(cmd *cobra.Command, flag string) string { func GetFlagStringSlice(cmd *cobra.Command, flag string) []string { s, err := cmd.Flags().GetStringSlice(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return s } @@ -321,7 +318,7 @@ func GetFlagStringSlice(cmd *cobra.Command, flag string) []string { func GetFlagStringArray(cmd *cobra.Command, flag string) []string { s, err := cmd.Flags().GetStringArray(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return s } @@ -329,7 +326,7 @@ func GetFlagStringArray(cmd *cobra.Command, flag string) []string { func GetFlagBool(cmd *cobra.Command, flag string) bool { b, err := cmd.Flags().GetBool(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return b } @@ -338,7 +335,7 @@ func GetFlagBool(cmd *cobra.Command, flag string) bool { func GetFlagInt(cmd *cobra.Command, flag string) int { i, err := cmd.Flags().GetInt(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return i } @@ -347,7 +344,7 @@ func GetFlagInt(cmd *cobra.Command, flag string) int { func GetFlagInt32(cmd *cobra.Command, flag string) int32 { i, err := cmd.Flags().GetInt32(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return i } @@ -356,7 +353,7 @@ func GetFlagInt32(cmd *cobra.Command, flag string) int32 { func GetFlagInt64(cmd *cobra.Command, flag string) int64 { i, err := cmd.Flags().GetInt64(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return i } @@ -364,7 +361,7 @@ func GetFlagInt64(cmd *cobra.Command, flag string) int64 { func GetFlagDuration(cmd *cobra.Command, flag string) time.Duration { d, err := cmd.Flags().GetDuration(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return d } @@ -619,53 +616,6 @@ func ShouldIncludeUninitialized(cmd *cobra.Command, includeUninitialized bool) b return shouldIncludeUninitialized } -// DescriberFunc gives a way to display the specified RESTMapping type -type DescriberFunc func(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (printers.Describer, error) - -// DescriberFn gives a way to easily override the function for unit testing if needed -var DescriberFn DescriberFunc = describer - -// Returns a Describer for displaying the specified RESTMapping type or an error. -func describer(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (printers.Describer, error) { - clientConfig, err := restClientGetter.ToRESTConfig() - if err != nil { - return nil, err - } - // try to get a describer - if describer, ok := printersinternal.DescriberFor(mapping.GroupVersionKind.GroupKind(), clientConfig); ok { - return describer, nil - } - // if this is a kind we don't have a describer for yet, go generic if possible - if genericDescriber, genericErr := genericDescriber(restClientGetter, mapping); genericErr == nil { - return genericDescriber, nil - } - // otherwise return an unregistered error - return nil, fmt.Errorf("no description has been implemented for %s", mapping.GroupVersionKind.String()) -} - -// helper function to make a generic describer, or return an error -func genericDescriber(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (printers.Describer, error) { - clientConfig, err := restClientGetter.ToRESTConfig() - if err != nil { - return nil, err - } - - // used to fetch the resource - dynamicClient, err := dynamic.NewForConfig(clientConfig) - if err != nil { - return nil, err - } - - // used to get events for the resource - clientSet, err := internalclientset.NewForConfig(clientConfig) - if err != nil { - return nil, err - } - - eventsClient := clientSet.Core() - return printersinternal.GenericDescriberFor(mapping, dynamicClient, eventsClient), nil -} - // ScaleClientFunc provides a ScalesGetter type ScaleClientFunc func(genericclioptions.RESTClientGetter) (scale.ScalesGetter, error) @@ -697,3 +647,12 @@ func scaleClient(restClientGetter genericclioptions.RESTClientGetter) (scale.Sca return scale.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil } + +func Warning(cmdErr io.Writer, newGeneratorName, oldGeneratorName string) { + fmt.Fprintf(cmdErr, "WARNING: New generator %q specified, "+ + "but it isn't available. "+ + "Falling back to %q.\n", + newGeneratorName, + oldGeneratorName, + ) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/kubectl_match_version.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/kubectl_match_version.go index bf5745c2a6c08..99ba2baeec651 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/kubectl_match_version.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/kubectl_match_version.go @@ -23,12 +23,13 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/discovery" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/version" ) @@ -120,7 +121,10 @@ func setKubernetesDefaults(config *rest.Config) error { config.APIPath = "/api" } if config.NegotiatedSerializer == nil { - config.NegotiatedSerializer = legacyscheme.Codecs + // This codec factory ensures the resources are not converted. Therefore, resources + // will not be round-tripped through internal versions. Defaulting does not happen + // on the client. + config.NegotiatedSerializer = &serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} } return rest.SetKubernetesDefaults(config) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD.bazel index f368b4977a458..4772e9242cbf3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD.bazel @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "doc.go", + "dryrun.go", "extensions.go", "openapi.go", "openapi_getter.go", @@ -14,6 +15,7 @@ go_library( deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", + "//vendor/gopkg.in/yaml.v2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/client-go/discovery:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/doc.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/doc.go index 56b393a1eb286..7fff1fa87c3a3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/doc.go @@ -18,4 +18,4 @@ limitations under the License. // from a Kubernetes server and then indexing the type definitions. // The openapi spec contains the object model definitions and extensions metadata // such as the patchStrategy and patchMergeKey for creating patches. -package openapi +package openapi // k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/dryrun.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/dryrun.go new file mode 100644 index 0000000000000..33cf9e9e5c89b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/dryrun.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "errors" + + openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" + yaml "gopkg.in/yaml.v2" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func hasGVKExtension(extensions []*openapi_v2.NamedAny, gvk schema.GroupVersionKind) bool { + for _, extension := range extensions { + if extension.GetValue().GetYaml() == "" || + extension.GetName() != "x-kubernetes-group-version-kind" { + continue + } + var value map[string]string + err := yaml.Unmarshal([]byte(extension.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + if value["group"] == gvk.Group && value["kind"] == gvk.Kind && value["version"] == gvk.Version { + return true + } + return false + } + return false +} + +// SupportsDryRun is a method that let's us look in the OpenAPI if the +// specific group-version-kind supports the dryRun query parameter for +// the PATCH end-point. +func SupportsDryRun(doc *openapi_v2.Document, gvk schema.GroupVersionKind) (bool, error) { + for _, path := range doc.GetPaths().GetPath() { + // Is this describing the gvk we're looking for? + if !hasGVKExtension(path.GetValue().GetPatch().GetVendorExtension(), gvk) { + continue + } + for _, param := range path.GetValue().GetPatch().GetParameters() { + if param.GetParameter().GetNonBodyParameter().GetQueryParameterSubSchema().GetName() == "dryRun" { + return true, nil + } + } + return false, nil + } + + return false, errors.New("couldn't find GVK in openapi") +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go index dab4792c1eea5..75b73c6204817 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go @@ -19,7 +19,7 @@ package util import ( "fmt" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) // SuggestApiResources returns a suggestion to use the "api-resources" command diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/version/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/version/BUILD.bazel new file mode 100644 index 0000000000000..7283797f2e3ca --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/version/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["version.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/version", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/version", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/version:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/version.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/version/version.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/version.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/cmd/version/version.go index 7e616cbf8d53b..1d249e213d262 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/version.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/version/version.go @@ -14,22 +14,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmd +package version import ( "encoding/json" "errors" "fmt" - "github.com/ghodss/yaml" "github.com/spf13/cobra" + "sigs.k8s.io/yaml" apimachineryversion "k8s.io/apimachinery/pkg/version" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/discovery" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + "k8s.io/client-go/tools/clientcmd" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" + "k8s.io/kubernetes/pkg/kubectl/util/templates" "k8s.io/kubernetes/pkg/version" ) @@ -83,7 +84,9 @@ func NewCmdVersion(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *co func (o *VersionOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { var err error o.discoveryClient, err = f.ToDiscoveryClient() - if err != nil { + // if we had an empty rest.Config, continue and just print out client information. + // if we had an error other than being unable to build a rest.Config, fail. + if err != nil && !clientcmd.IsEmptyConfig(err) { return err } return nil @@ -107,7 +110,7 @@ func (o *VersionOptions) Run() error { clientVersion := version.Get() versionInfo.ClientVersion = &clientVersion - if !o.ClientOnly { + if !o.ClientOnly && o.discoveryClient != nil { // Always request fresh data from the server o.discoveryClient.Invalidate() serverVersion, serverErr = o.discoveryClient.ServerVersion() diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/wait/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/wait/BUILD.bazel index 7ed2eb2620562..76e2733c2bdb0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/wait/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/wait/BUILD.bazel @@ -11,6 +11,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", @@ -21,7 +22,7 @@ go_library( "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/tools/watch:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/wait/wait.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/wait/wait.go index 7aec8c1d75722..3868bae5a2695 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/wait/wait.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/wait/wait.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "io" "strings" "time" @@ -28,6 +29,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" @@ -38,8 +40,8 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/client-go/dynamic" watchtools "k8s.io/client-go/tools/watch" - "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( @@ -102,11 +104,11 @@ func NewCmdWait(restClientGetter genericclioptions.RESTClientGetter, streams gen flags := NewWaitFlags(restClientGetter, streams) cmd := &cobra.Command{ - Use: "wait resource.group/name [--for=delete|--for condition=available]", + Use: "wait resource.group/name [--for=delete|--for condition=available]", DisableFlagsInUseLine: true, - Short: "Experimental: Wait for a specific condition on one or many resources.", - Long: wait_long, - Example: wait_example, + Short: "Experimental: Wait for a specific condition on one or many resources.", + Long: wait_long, + Example: wait_example, Run: func(cmd *cobra.Command, args []string) { o, err := flags.ToOptions(args) cmdutil.CheckErr(err) @@ -145,7 +147,7 @@ func (flags *WaitFlags) ToOptions(args []string) (*WaitOptions, error) { if err != nil { return nil, err } - conditionFn, err := conditionFuncFor(flags.ForCondition) + conditionFn, err := conditionFuncFor(flags.ForCondition, flags.ErrOut) if err != nil { return nil, err } @@ -168,16 +170,22 @@ func (flags *WaitFlags) ToOptions(args []string) (*WaitOptions, error) { return o, nil } -func conditionFuncFor(condition string) (ConditionFunc, error) { +func conditionFuncFor(condition string, errOut io.Writer) (ConditionFunc, error) { if strings.ToLower(condition) == "delete" { return IsDeleted, nil } if strings.HasPrefix(condition, "condition=") { conditionName := condition[len("condition="):] + conditionValue := "true" + if equalsIndex := strings.Index(conditionName, "="); equalsIndex != -1 { + conditionValue = conditionName[equalsIndex+1:] + conditionName = conditionName[0:equalsIndex] + } + return ConditionalWait{ - conditionName: conditionName, - // TODO allow specifying a false - conditionStatus: "true", + conditionName: conditionName, + conditionStatus: conditionValue, + errOut: errOut, }.IsConditionMet, nil } @@ -242,7 +250,14 @@ func (o *WaitOptions) RunWait() error { func IsDeleted(info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { endTime := time.Now().Add(o.Timeout) for { - gottenObj, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Get(info.Name, metav1.GetOptions{}) + if len(info.Name) == 0 { + return info.Object, false, fmt.Errorf("resource name must be provided") + } + + nameSelector := fields.OneTermEqualSelector("metadata.name", info.Name).String() + + // List with a name field selector to get the current resourceVersion to watch from (not the object's resourceVersion) + gottenObjList, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).List(metav1.ListOptions{FieldSelector: nameSelector}) if apierrors.IsNotFound(err) { return info.Object, true, nil } @@ -250,6 +265,10 @@ func IsDeleted(info *resource.Info, o *WaitOptions) (runtime.Object, bool, error // TODO this could do something slightly fancier if we wish return info.Object, false, err } + if len(gottenObjList.Items) != 1 { + return info.Object, true, nil + } + gottenObj := &gottenObjList.Items[0] resourceLocation := ResourceLocation{ GroupResource: info.Mapping.Resource.GroupResource(), Namespace: gottenObj.GetNamespace(), @@ -262,8 +281,8 @@ func IsDeleted(info *resource.Info, o *WaitOptions) (runtime.Object, bool, error } watchOptions := metav1.ListOptions{} - watchOptions.FieldSelector = "metadata.name=" + info.Name - watchOptions.ResourceVersion = gottenObj.GetResourceVersion() + watchOptions.FieldSelector = nameSelector + watchOptions.ResourceVersion = gottenObjList.GetResourceVersion() objWatch, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(watchOptions) if err != nil { return gottenObj, false, err @@ -276,7 +295,7 @@ func IsDeleted(info *resource.Info, o *WaitOptions) (runtime.Object, bool, error } ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), o.Timeout) - watchEvent, err := watchtools.UntilWithoutRetry(ctx, objWatch, isDeleted) + watchEvent, err := watchtools.UntilWithoutRetry(ctx, objWatch, Wait{errOut: o.ErrOut}.IsDeleted) cancel() switch { case err == nil: @@ -294,28 +313,57 @@ func IsDeleted(info *resource.Info, o *WaitOptions) (runtime.Object, bool, error } } -func isDeleted(event watch.Event) (bool, error) { - return event.Type == watch.Deleted, nil +// Wait has helper methods for handling watches, including error handling. +type Wait struct { + errOut io.Writer +} + +// IsDeleted returns true if the object is deleted. It prints any errors it encounters. +func (w Wait) IsDeleted(event watch.Event) (bool, error) { + switch event.Type { + case watch.Error: + // keep waiting in the event we see an error - we expect the watch to be closed by + // the server if the error is unrecoverable. + err := apierrors.FromObject(event.Object) + fmt.Fprintf(w.errOut, "error: An error occurred while waiting for the object to be deleted: %v", err) + return false, nil + case watch.Deleted: + return true, nil + default: + return false, nil + } } // ConditionalWait hold information to check an API status condition type ConditionalWait struct { conditionName string conditionStatus string + // errOut is written to if an error occurs + errOut io.Writer } // IsConditionMet is a conditionfunc for waiting on an API condition to be met func (w ConditionalWait) IsConditionMet(info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { endTime := time.Now().Add(o.Timeout) for { + if len(info.Name) == 0 { + return info.Object, false, fmt.Errorf("resource name must be provided") + } + + nameSelector := fields.OneTermEqualSelector("metadata.name", info.Name).String() + + var gottenObj *unstructured.Unstructured + // List with a name field selector to get the current resourceVersion to watch from (not the object's resourceVersion) + gottenObjList, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).List(metav1.ListOptions{FieldSelector: nameSelector}) + resourceVersion := "" - gottenObj, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Get(info.Name, metav1.GetOptions{}) switch { - case apierrors.IsNotFound(err): - resourceVersion = "0" case err != nil: return info.Object, false, err + case len(gottenObjList.Items) != 1: + resourceVersion = gottenObjList.GetResourceVersion() default: + gottenObj = &gottenObjList.Items[0] conditionMet, err := w.checkCondition(gottenObj) if conditionMet { return gottenObj, true, nil @@ -323,11 +371,11 @@ func (w ConditionalWait) IsConditionMet(info *resource.Info, o *WaitOptions) (ru if err != nil { return gottenObj, false, err } - resourceVersion = gottenObj.GetResourceVersion() + resourceVersion = gottenObjList.GetResourceVersion() } watchOptions := metav1.ListOptions{} - watchOptions.FieldSelector = "metadata.name=" + info.Name + watchOptions.FieldSelector = nameSelector watchOptions.ResourceVersion = resourceVersion objWatch, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(watchOptions) if err != nil { @@ -384,6 +432,13 @@ func (w ConditionalWait) checkCondition(obj *unstructured.Unstructured) (bool, e } func (w ConditionalWait) isConditionMet(event watch.Event) (bool, error) { + if event.Type == watch.Error { + // keep waiting in the event we see an error - we expect the watch to be closed by + // the server + err := apierrors.FromObject(event.Object) + fmt.Fprintf(w.errOut, "error: An error occurred while waiting for the condition to be satisfied: %v", err) + return false, nil + } if event.Type == watch.Deleted { // this will chain back out, result in another get and an return false back up the chain return false, nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go b/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go index 36a752c9ad2d6..405bc6d4634b8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go @@ -26,9 +26,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/kubernetes/pkg/api/pod" - podv1 "k8s.io/kubernetes/pkg/api/v1/pod" - api "k8s.io/kubernetes/pkg/apis/core" ) // ControllerHasDesiredReplicas returns a condition that will be true if and only if @@ -56,36 +53,6 @@ func ControllerHasDesiredReplicas(rcClient corev1client.ReplicationControllersGe // the pod has already reached completed state. var ErrPodCompleted = fmt.Errorf("pod ran to completion") -// ErrContainerTerminated is returned by PodContainerRunning in the intermediate -// state where the pod indicates it's still running, but its container is already terminated -var ErrContainerTerminated = fmt.Errorf("container terminated") - -// PodRunning returns true if the pod is running, false if the pod has not yet reached running state, -// returns ErrPodCompleted if the pod has run to completion, or an error in any other case. -func PodRunning(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodRunning: - return true, nil - case api.PodFailed, api.PodSucceeded: - return false, ErrPodCompleted - } - case *corev1.Pod: - switch t.Status.Phase { - case corev1.PodRunning: - return true, nil - case corev1.PodFailed, corev1.PodSucceeded: - return false, ErrPodCompleted - } - } - return false, nil -} - // PodCompleted returns true if the pod has run to completion, false if the pod has not yet // reached running state, or an error in any other case. func PodCompleted(event watch.Event) (bool, error) { @@ -94,11 +61,6 @@ func PodCompleted(event watch.Event) (bool, error) { return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") } switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodFailed, api.PodSucceeded: - return true, nil - } case *corev1.Pod: switch t.Status.Phase { case corev1.PodFailed, corev1.PodSucceeded: @@ -117,38 +79,21 @@ func PodRunningAndReady(event watch.Event) (bool, error) { return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") } switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodFailed, api.PodSucceeded: - return false, ErrPodCompleted - case api.PodRunning: - return pod.IsPodReady(t), nil - } case *corev1.Pod: switch t.Status.Phase { case corev1.PodFailed, corev1.PodSucceeded: return false, ErrPodCompleted case corev1.PodRunning: - return podv1.IsPodReady(t), nil - } - } - return false, nil -} - -// PodNotPending returns true if the pod has left the pending state, false if it has not, -// or an error in any other case (such as if the pod was deleted). -func PodNotPending(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodPending: - return false, nil - default: - return true, nil + conditions := t.Status.Conditions + if conditions == nil { + return false, nil + } + for i := range conditions { + if conditions[i].Type == corev1.PodReady && + conditions[i].Status == corev1.ConditionTrue { + return true, nil + } + } } } return false, nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/describe/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/describe/BUILD.bazel new file mode 100644 index 0000000000000..eb1fefeef0dc0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/describe/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["interface.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/describe", + importpath = "k8s.io/kubernetes/pkg/kubectl/describe", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/describe/interface.go b/vendor/k8s.io/kubernetes/pkg/kubectl/describe/interface.go new file mode 100644 index 0000000000000..58429918787e9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/describe/interface.go @@ -0,0 +1,71 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package describe + +import ( + "fmt" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +const ( + // LoadBalancerWidth is the width how we describe load balancer + LoadBalancerWidth = 16 + + // LabelNodeRolePrefix is a label prefix for node roles + // It's copied over to here until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112 + LabelNodeRolePrefix = "node-role.kubernetes.io/" + + // NodeLabelRole specifies the role of a node + NodeLabelRole = "kubernetes.io/role" +) + +// DescriberFunc gives a way to display the specified RESTMapping type +type DescriberFunc func(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (Describer, error) + +// Describer generates output for the named resource or an error +// if the output could not be generated. Implementers typically +// abstract the retrieval of the named object from a remote server. +type Describer interface { + Describe(namespace, name string, describerSettings DescriberSettings) (output string, err error) +} + +// DescriberSettings holds display configuration for each object +// describer to control what is printed. +type DescriberSettings struct { + ShowEvents bool +} + +// ObjectDescriber is an interface for displaying arbitrary objects with extra +// information. Use when an object is in hand (on disk, or already retrieved). +// Implementers may ignore the additional information passed on extra, or use it +// by default. ObjectDescribers may return ErrNoDescriber if no suitable describer +// is found. +type ObjectDescriber interface { + DescribeObject(object interface{}, extra ...interface{}) (output string, err error) +} + +// ErrNoDescriber is a structured error indicating the provided object or objects +// cannot be described. +type ErrNoDescriber struct { + Types []string +} + +// Error implements the error interface. +func (e ErrNoDescriber) Error() string { + return fmt.Sprintf("no describer has been defined for %v", e.Types) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/BUILD.bazel new file mode 100644 index 0000000000000..fe1c8b4fb1f1e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/BUILD.bazel @@ -0,0 +1,53 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["describe.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned", + importpath = "k8s.io/kubernetes/pkg/kubectl/describe/versioned", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/fatih/camelcase:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/api/autoscaling/v2beta2:go_default_library", + "//vendor/k8s.io/api/batch/v1:go_default_library", + "//vendor/k8s.io/api/batch/v1beta1:go_default_library", + "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/api/networking/v1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", + "//vendor/k8s.io/api/scheduling/v1beta1:go_default_library", + "//vendor/k8s.io/api/storage/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/duration:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/reference:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/describe:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/event:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/qos:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/resource:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/slice:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/storage:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go b/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/describe.go similarity index 78% rename from vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/describe.go index 1fe2b0ffd1a76..6f6dba6f0bcbc 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/describe.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package internalversion +package versioned import ( "bytes" @@ -30,12 +30,20 @@ import ( "text/tabwriter" "time" - "github.com/golang/glog" - "github.com/fatih/camelcase" appsv1 "k8s.io/api/apps/v1" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" @@ -47,35 +55,24 @@ import ( "k8s.io/apimachinery/pkg/util/duration" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/dynamic" - externalclient "k8s.io/client-go/kubernetes" + clientset "k8s.io/client-go/kubernetes" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/api/events" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/api/ref" - resourcehelper "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/certificates" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/pkg/apis/core/helper/qos" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/networking" - "k8s.io/kubernetes/pkg/apis/policy" - "k8s.io/kubernetes/pkg/apis/rbac" - rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" - "k8s.io/kubernetes/pkg/apis/scheduling" - "k8s.io/kubernetes/pkg/apis/storage" - storageutil "k8s.io/kubernetes/pkg/apis/storage/util" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/fieldpath" - "k8s.io/kubernetes/pkg/printers" - "k8s.io/kubernetes/pkg/registry/rbac/validation" - "k8s.io/kubernetes/pkg/util/slice" + "k8s.io/client-go/tools/reference" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/kubectl/describe" + "k8s.io/kubernetes/pkg/kubectl/scheme" + "k8s.io/kubernetes/pkg/kubectl/util/certificate" + deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" + "k8s.io/kubernetes/pkg/kubectl/util/event" + "k8s.io/kubernetes/pkg/kubectl/util/fieldpath" + "k8s.io/kubernetes/pkg/kubectl/util/qos" + "k8s.io/kubernetes/pkg/kubectl/util/rbac" + resourcehelper "k8s.io/kubernetes/pkg/kubectl/util/resource" + "k8s.io/kubernetes/pkg/kubectl/util/slice" + storageutil "k8s.io/kubernetes/pkg/kubectl/util/storage" ) // Each level has 2 spaces for PrefixWriter @@ -86,6 +83,27 @@ const ( LEVEL_3 ) +// DescriberFn gives a way to easily override the function for unit testing if needed +var DescriberFn describe.DescriberFunc = Describer + +// Describer returns a Describer for displaying the specified RESTMapping type or an error. +func Describer(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (describe.Describer, error) { + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + // try to get a describer + if describer, ok := DescriberFor(mapping.GroupVersionKind.GroupKind(), clientConfig); ok { + return describer, nil + } + // if this is a kind we don't have a describer for yet, go generic if possible + if genericDescriber, ok := GenericDescriberFor(mapping, clientConfig); ok { + return genericDescriber, nil + } + // otherwise return an unregistered error + return nil, fmt.Errorf("no description has been implemented for %s", mapping.GroupVersionKind.String()) +} + // PrefixWriter can write text at various indentation levels. type PrefixWriter interface { // Write writes text with the specified indentation level. @@ -127,54 +145,49 @@ func (pw *prefixWriter) Flush() { } } -func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]printers.Describer, error) { +func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]describe.Describer, error) { c, err := clientset.NewForConfig(clientConfig) if err != nil { return nil, err } - externalclient, err := externalclient.NewForConfig(clientConfig) - if err != nil { - return nil, err - } - m := map[schema.GroupKind]printers.Describer{ - api.Kind("Pod"): &PodDescriber{c}, - api.Kind("ReplicationController"): &ReplicationControllerDescriber{c}, - api.Kind("Secret"): &SecretDescriber{c}, - api.Kind("Service"): &ServiceDescriber{c}, - api.Kind("ServiceAccount"): &ServiceAccountDescriber{c}, - api.Kind("Node"): &NodeDescriber{c}, - api.Kind("LimitRange"): &LimitRangeDescriber{c}, - api.Kind("ResourceQuota"): &ResourceQuotaDescriber{c}, - api.Kind("PersistentVolume"): &PersistentVolumeDescriber{c}, - api.Kind("PersistentVolumeClaim"): &PersistentVolumeClaimDescriber{c}, - api.Kind("Namespace"): &NamespaceDescriber{c}, - api.Kind("Endpoints"): &EndpointsDescriber{c}, - api.Kind("ConfigMap"): &ConfigMapDescriber{c}, - api.Kind("PriorityClass"): &PriorityClassDescriber{c}, - - extensions.Kind("ReplicaSet"): &ReplicaSetDescriber{c}, - extensions.Kind("NetworkPolicy"): &NetworkPolicyDescriber{c}, - extensions.Kind("PodSecurityPolicy"): &PodSecurityPolicyDescriber{c}, - autoscaling.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c}, - extensions.Kind("DaemonSet"): &DaemonSetDescriber{c}, - extensions.Kind("Deployment"): &DeploymentDescriber{c, externalclient}, - extensions.Kind("Ingress"): &IngressDescriber{c}, - batch.Kind("Job"): &JobDescriber{c}, - batch.Kind("CronJob"): &CronJobDescriber{c, externalclient}, - apps.Kind("StatefulSet"): &StatefulSetDescriber{c}, - apps.Kind("Deployment"): &DeploymentDescriber{c, externalclient}, - apps.Kind("DaemonSet"): &DaemonSetDescriber{c}, - apps.Kind("ReplicaSet"): &ReplicaSetDescriber{c}, - certificates.Kind("CertificateSigningRequest"): &CertificateSigningRequestDescriber{c}, - storage.Kind("StorageClass"): &StorageClassDescriber{c}, - policy.Kind("PodDisruptionBudget"): &PodDisruptionBudgetDescriber{c}, - rbac.Kind("Role"): &RoleDescriber{externalclient}, - rbac.Kind("ClusterRole"): &ClusterRoleDescriber{externalclient}, - rbac.Kind("RoleBinding"): &RoleBindingDescriber{externalclient}, - rbac.Kind("ClusterRoleBinding"): &ClusterRoleBindingDescriber{externalclient}, - networking.Kind("NetworkPolicy"): &NetworkPolicyDescriber{c}, - scheduling.Kind("PriorityClass"): &PriorityClassDescriber{c}, + m := map[schema.GroupKind]describe.Describer{ + {Group: corev1.GroupName, Kind: "Pod"}: &PodDescriber{c}, + {Group: corev1.GroupName, Kind: "ReplicationController"}: &ReplicationControllerDescriber{c}, + {Group: corev1.GroupName, Kind: "Secret"}: &SecretDescriber{c}, + {Group: corev1.GroupName, Kind: "Service"}: &ServiceDescriber{c}, + {Group: corev1.GroupName, Kind: "ServiceAccount"}: &ServiceAccountDescriber{c}, + {Group: corev1.GroupName, Kind: "Node"}: &NodeDescriber{c}, + {Group: corev1.GroupName, Kind: "LimitRange"}: &LimitRangeDescriber{c}, + {Group: corev1.GroupName, Kind: "ResourceQuota"}: &ResourceQuotaDescriber{c}, + {Group: corev1.GroupName, Kind: "PersistentVolume"}: &PersistentVolumeDescriber{c}, + {Group: corev1.GroupName, Kind: "PersistentVolumeClaim"}: &PersistentVolumeClaimDescriber{c}, + {Group: corev1.GroupName, Kind: "Namespace"}: &NamespaceDescriber{c}, + {Group: corev1.GroupName, Kind: "Endpoints"}: &EndpointsDescriber{c}, + {Group: corev1.GroupName, Kind: "ConfigMap"}: &ConfigMapDescriber{c}, + {Group: corev1.GroupName, Kind: "PriorityClass"}: &PriorityClassDescriber{c}, + {Group: extensionsv1beta1.GroupName, Kind: "ReplicaSet"}: &ReplicaSetDescriber{c}, + {Group: extensionsv1beta1.GroupName, Kind: "NetworkPolicy"}: &NetworkPolicyDescriber{c}, + {Group: extensionsv1beta1.GroupName, Kind: "PodSecurityPolicy"}: &PodSecurityPolicyDescriber{c}, + {Group: autoscalingv2beta2.GroupName, Kind: "HorizontalPodAutoscaler"}: &HorizontalPodAutoscalerDescriber{c}, + {Group: extensionsv1beta1.GroupName, Kind: "DaemonSet"}: &DaemonSetDescriber{c}, + {Group: extensionsv1beta1.GroupName, Kind: "Deployment"}: &DeploymentDescriber{c}, + {Group: extensionsv1beta1.GroupName, Kind: "Ingress"}: &IngressDescriber{c}, + {Group: batchv1.GroupName, Kind: "Job"}: &JobDescriber{c}, + {Group: batchv1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c}, + {Group: appsv1.GroupName, Kind: "StatefulSet"}: &StatefulSetDescriber{c}, + {Group: appsv1.GroupName, Kind: "Deployment"}: &DeploymentDescriber{c}, + {Group: appsv1.GroupName, Kind: "DaemonSet"}: &DaemonSetDescriber{c}, + {Group: appsv1.GroupName, Kind: "ReplicaSet"}: &ReplicaSetDescriber{c}, + {Group: certificatesv1beta1.GroupName, Kind: "CertificateSigningRequest"}: &CertificateSigningRequestDescriber{c}, + {Group: storagev1.GroupName, Kind: "StorageClass"}: &StorageClassDescriber{c}, + {Group: policyv1beta1.GroupName, Kind: "PodDisruptionBudget"}: &PodDisruptionBudgetDescriber{c}, + {Group: rbacv1.GroupName, Kind: "Role"}: &RoleDescriber{c}, + {Group: rbacv1.GroupName, Kind: "ClusterRole"}: &ClusterRoleDescriber{c}, + {Group: rbacv1.GroupName, Kind: "RoleBinding"}: &RoleBindingDescriber{c}, + {Group: rbacv1.GroupName, Kind: "ClusterRoleBinding"}: &ClusterRoleBindingDescriber{c}, + {Group: networkingv1.GroupName, Kind: "NetworkPolicy"}: &NetworkPolicyDescriber{c}, + {Group: schedulingv1beta1.GroupName, Kind: "PriorityClass"}: &PriorityClassDescriber{c}, } return m, nil @@ -182,10 +195,10 @@ func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]printers.Desc // DescriberFor returns the default describe functions for each of the standard // Kubernetes types. -func DescriberFor(kind schema.GroupKind, clientConfig *rest.Config) (printers.Describer, bool) { +func DescriberFor(kind schema.GroupKind, clientConfig *rest.Config) (describe.Describer, bool) { describers, err := describerMap(clientConfig) if err != nil { - glog.V(1).Info(err) + klog.V(1).Info(err) return nil, false } @@ -195,25 +208,38 @@ func DescriberFor(kind schema.GroupKind, clientConfig *rest.Config) (printers.De // GenericDescriberFor returns a generic describer for the specified mapping // that uses only information available from runtime.Unstructured -func GenericDescriberFor(mapping *meta.RESTMapping, dynamic dynamic.Interface, events coreclient.EventsGetter) printers.Describer { - return &genericDescriber{mapping, dynamic, events} +func GenericDescriberFor(mapping *meta.RESTMapping, clientConfig *rest.Config) (describe.Describer, bool) { + // used to fetch the resource + dynamicClient, err := dynamic.NewForConfig(clientConfig) + if err != nil { + return nil, false + } + + // used to get events for the resource + clientSet, err := clientset.NewForConfig(clientConfig) + if err != nil { + return nil, false + } + eventsClient := clientSet.Core() + + return &genericDescriber{mapping, dynamicClient, eventsClient}, true } type genericDescriber struct { mapping *meta.RESTMapping dynamic dynamic.Interface - events coreclient.EventsGetter + events corev1client.EventsGetter } -func (g *genericDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (output string, err error) { +func (g *genericDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (output string, err error) { obj, err := g.dynamic.Resource(g.mapping.Resource).Namespace(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = g.events.Events(namespace).Search(legacyscheme.Scheme, obj) + events, _ = g.events.Events(namespace).Search(scheme.Scheme, obj) } return tabbedString(func(out io.Writer) error { @@ -295,7 +321,7 @@ func smartLabelFor(field string) string { } // DefaultObjectDescriber can describe the default Kubernetes objects. -var DefaultObjectDescriber printers.ObjectDescriber +var DefaultObjectDescriber describe.ObjectDescriber func init() { d := &Describers{} @@ -310,7 +336,7 @@ func init() { describeNamespace, ) if err != nil { - glog.Fatalf("Cannot register describers: %v", err) + klog.Fatalf("Cannot register describers: %v", err) } DefaultObjectDescriber = d } @@ -320,7 +346,7 @@ type NamespaceDescriber struct { clientset.Interface } -func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { ns, err := d.Core().Namespaces().Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -348,7 +374,7 @@ func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings return describeNamespace(ns, resourceQuotaList, limitRangeList) } -func describeNamespace(namespace *api.Namespace, resourceQuotaList *api.ResourceQuotaList, limitRangeList *api.LimitRangeList) (string, error) { +func describeNamespace(namespace *corev1.Namespace, resourceQuotaList *corev1.ResourceQuotaList, limitRangeList *corev1.LimitRangeList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", namespace.Name) @@ -367,7 +393,7 @@ func describeNamespace(namespace *api.Namespace, resourceQuotaList *api.Resource }) } -func describeLimitRangeSpec(spec api.LimitRangeSpec, prefix string, w PrefixWriter) { +func describeLimitRangeSpec(spec corev1.LimitRangeSpec, prefix string, w PrefixWriter) { for i := range spec.Limits { item := spec.Limits[i] maxResources := item.Max @@ -376,7 +402,7 @@ func describeLimitRangeSpec(spec api.LimitRangeSpec, prefix string, w PrefixWrit defaultRequestResources := item.DefaultRequest ratio := item.MaxLimitRequestRatio - set := map[api.ResourceName]bool{} + set := map[corev1.ResourceName]bool{} for k := range maxResources { set[k] = true } @@ -433,7 +459,7 @@ func describeLimitRangeSpec(spec api.LimitRangeSpec, prefix string, w PrefixWrit } // DescribeLimitRanges merges a set of limit range items into a single tabular description -func DescribeLimitRanges(limitRanges *api.LimitRangeList, w PrefixWriter) { +func DescribeLimitRanges(limitRanges *corev1.LimitRangeList, w PrefixWriter) { if len(limitRanges.Items) == 0 { w.Write(LEVEL_0, "No resource limits.\n") return @@ -446,7 +472,7 @@ func DescribeLimitRanges(limitRanges *api.LimitRangeList, w PrefixWriter) { } // DescribeResourceQuotas merges a set of quota items into a single tabular description of all quotas -func DescribeResourceQuotas(quotas *api.ResourceQuotaList, w PrefixWriter) { +func DescribeResourceQuotas(quotas *corev1.ResourceQuotaList, w PrefixWriter) { if len(quotas.Items) == 0 { w.Write(LEVEL_0, "No resource quota.\n") return @@ -464,7 +490,7 @@ func DescribeResourceQuotas(quotas *api.ResourceQuotaList, w PrefixWriter) { sort.Strings(scopes) w.Write(LEVEL_0, " Scopes:\t%s\n", strings.Join(scopes, ", ")) for _, scope := range scopes { - helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope)) + helpText := helpTextForResourceQuotaScope(corev1.ResourceQuotaScope(scope)) if len(helpText) > 0 { w.Write(LEVEL_0, " * %s\n", helpText) } @@ -474,7 +500,7 @@ func DescribeResourceQuotas(quotas *api.ResourceQuotaList, w PrefixWriter) { w.Write(LEVEL_0, " Resource\tUsed\tHard\n") w.Write(LEVEL_0, " --------\t---\t---\n") - resources := make([]api.ResourceName, 0, len(q.Status.Hard)) + resources := make([]corev1.ResourceName, 0, len(q.Status.Hard)) for resource := range q.Status.Hard { resources = append(resources, resource) } @@ -493,7 +519,7 @@ type LimitRangeDescriber struct { clientset.Interface } -func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { lr := d.Core().LimitRanges(namespace) limitRange, err := lr.Get(name, metav1.GetOptions{}) @@ -503,7 +529,7 @@ func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings return describeLimitRange(limitRange) } -func describeLimitRange(limitRange *api.LimitRange) (string, error) { +func describeLimitRange(limitRange *corev1.LimitRange) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", limitRange.Name) @@ -520,7 +546,7 @@ type ResourceQuotaDescriber struct { clientset.Interface } -func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { rq := d.Core().ResourceQuotas(namespace) resourceQuota, err := rq.Get(name, metav1.GetOptions{}) @@ -531,21 +557,21 @@ func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSetti return describeQuota(resourceQuota) } -func helpTextForResourceQuotaScope(scope api.ResourceQuotaScope) string { +func helpTextForResourceQuotaScope(scope corev1.ResourceQuotaScope) string { switch scope { - case api.ResourceQuotaScopeTerminating: + case corev1.ResourceQuotaScopeTerminating: return "Matches all pods that have an active deadline. These pods have a limited lifespan on a node before being actively terminated by the system." - case api.ResourceQuotaScopeNotTerminating: + case corev1.ResourceQuotaScopeNotTerminating: return "Matches all pods that do not have an active deadline. These pods usually include long running pods whose container command is not expected to terminate." - case api.ResourceQuotaScopeBestEffort: + case corev1.ResourceQuotaScopeBestEffort: return "Matches all pods that do not have resource requirements set. These pods have a best effort quality of service." - case api.ResourceQuotaScopeNotBestEffort: + case corev1.ResourceQuotaScopeNotBestEffort: return "Matches all pods that have at least one resource requirement set. These pods have a burstable or guaranteed quality of service." default: return "" } } -func describeQuota(resourceQuota *api.ResourceQuota) (string, error) { +func describeQuota(resourceQuota *corev1.ResourceQuota) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", resourceQuota.Name) @@ -558,7 +584,7 @@ func describeQuota(resourceQuota *api.ResourceQuota) (string, error) { sort.Strings(scopes) w.Write(LEVEL_0, "Scopes:\t%s\n", strings.Join(scopes, ", ")) for _, scope := range scopes { - helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope)) + helpText := helpTextForResourceQuotaScope(corev1.ResourceQuotaScope(scope)) if len(helpText) > 0 { w.Write(LEVEL_0, " * %s\n", helpText) } @@ -567,7 +593,7 @@ func describeQuota(resourceQuota *api.ResourceQuota) (string, error) { w.Write(LEVEL_0, "Resource\tUsed\tHard\n") w.Write(LEVEL_0, "--------\t----\t----\n") - resources := make([]api.ResourceName, 0, len(resourceQuota.Status.Hard)) + resources := make([]corev1.ResourceName, 0, len(resourceQuota.Status.Hard)) for resource := range resourceQuota.Status.Hard { resources = append(resources, resource) } @@ -590,7 +616,7 @@ type PodDescriber struct { clientset.Interface } -func (d *PodDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *PodDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { pod, err := d.Core().Pods(namespace).Get(name, metav1.GetOptions{}) if err != nil { if describerSettings.ShowEvents { @@ -610,20 +636,20 @@ func (d *PodDescriber) Describe(namespace, name string, describerSettings printe return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - if ref, err := ref.GetReference(legacyscheme.Scheme, pod); err != nil { - glog.Errorf("Unable to construct reference to '%#v': %v", pod, err) + if ref, err := reference.GetReference(scheme.Scheme, pod); err != nil { + klog.Errorf("Unable to construct reference to '%#v': %v", pod, err) } else { ref.Kind = "" - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, ref) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, ref) } } return describePod(pod, events) } -func describePod(pod *api.Pod, events *api.EventList) (string, error) { +func describePod(pod *corev1.Pod, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", pod.Name) @@ -666,6 +692,21 @@ func describePod(pod *api.Pod, events *api.EventList) (string, error) { describeContainers("Init Containers", pod.Spec.InitContainers, pod.Status.InitContainerStatuses, EnvValueRetriever(pod), w, "") } describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), w, "") + if len(pod.Spec.ReadinessGates) > 0 { + w.Write(LEVEL_0, "Readiness Gates:\n Type\tStatus\n") + for _, g := range pod.Spec.ReadinessGates { + status := "" + for _, c := range pod.Status.Conditions { + if c.Type == g.ConditionType { + status = fmt.Sprintf("%v", c.Status) + break + } + } + w.Write(LEVEL_1, "%v \t%v \n", + g.ConditionType, + status) + } + } if len(pod.Status.Conditions) > 0 { w.Write(LEVEL_0, "Conditions:\n Type\tStatus\n") for _, c := range pod.Status.Conditions { @@ -696,7 +737,7 @@ func printController(controllee metav1.Object) string { return "" } -func describeVolumes(volumes []api.Volume, w PrefixWriter, space string) { +func describeVolumes(volumes []corev1.Volume, w PrefixWriter, space string) { if volumes == nil || len(volumes) == 0 { w.Write(LEVEL_0, "%sVolumes:\t\n", space) return @@ -761,13 +802,15 @@ func describeVolumes(volumes []api.Volume, w PrefixWriter, space string) { printFlexVolumeSource(volume.VolumeSource.FlexVolume, w) case volume.VolumeSource.Flocker != nil: printFlockerVolumeSource(volume.VolumeSource.Flocker, w) + case volume.VolumeSource.Projected != nil: + printProjectedVolumeSource(volume.VolumeSource.Projected, w) default: w.Write(LEVEL_1, "\n") } } } -func printHostPathVolumeSource(hostPath *api.HostPathVolumeSource, w PrefixWriter) { +func printHostPathVolumeSource(hostPath *corev1.HostPathVolumeSource, w PrefixWriter) { hostPathType := "" if hostPath.Type != nil { hostPathType = string(*hostPath.Type) @@ -778,12 +821,12 @@ func printHostPathVolumeSource(hostPath *api.HostPathVolumeSource, w PrefixWrite hostPath.Path, hostPathType) } -func printEmptyDirVolumeSource(emptyDir *api.EmptyDirVolumeSource, w PrefixWriter) { +func printEmptyDirVolumeSource(emptyDir *corev1.EmptyDirVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tEmptyDir (a temporary directory that shares a pod's lifetime)\n"+ " Medium:\t%v\n", emptyDir.Medium) } -func printGCEPersistentDiskVolumeSource(gce *api.GCEPersistentDiskVolumeSource, w PrefixWriter) { +func printGCEPersistentDiskVolumeSource(gce *corev1.GCEPersistentDiskVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tGCEPersistentDisk (a Persistent Disk resource in Google Compute Engine)\n"+ " PDName:\t%v\n"+ " FSType:\t%v\n"+ @@ -792,7 +835,7 @@ func printGCEPersistentDiskVolumeSource(gce *api.GCEPersistentDiskVolumeSource, gce.PDName, gce.FSType, gce.Partition, gce.ReadOnly) } -func printAWSElasticBlockStoreVolumeSource(aws *api.AWSElasticBlockStoreVolumeSource, w PrefixWriter) { +func printAWSElasticBlockStoreVolumeSource(aws *corev1.AWSElasticBlockStoreVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tAWSElasticBlockStore (a Persistent Disk resource in AWS)\n"+ " VolumeID:\t%v\n"+ " FSType:\t%v\n"+ @@ -801,14 +844,14 @@ func printAWSElasticBlockStoreVolumeSource(aws *api.AWSElasticBlockStoreVolumeSo aws.VolumeID, aws.FSType, aws.Partition, aws.ReadOnly) } -func printGitRepoVolumeSource(git *api.GitRepoVolumeSource, w PrefixWriter) { +func printGitRepoVolumeSource(git *corev1.GitRepoVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tGitRepo (a volume that is pulled from git when the pod is created)\n"+ " Repository:\t%v\n"+ " Revision:\t%v\n", git.Repository, git.Revision) } -func printSecretVolumeSource(secret *api.SecretVolumeSource, w PrefixWriter) { +func printSecretVolumeSource(secret *corev1.SecretVolumeSource, w PrefixWriter) { optional := secret.Optional != nil && *secret.Optional w.Write(LEVEL_2, "Type:\tSecret (a volume populated by a Secret)\n"+ " SecretName:\t%v\n"+ @@ -816,7 +859,7 @@ func printSecretVolumeSource(secret *api.SecretVolumeSource, w PrefixWriter) { secret.SecretName, optional) } -func printConfigMapVolumeSource(configMap *api.ConfigMapVolumeSource, w PrefixWriter) { +func printConfigMapVolumeSource(configMap *corev1.ConfigMapVolumeSource, w PrefixWriter) { optional := configMap.Optional != nil && *configMap.Optional w.Write(LEVEL_2, "Type:\tConfigMap (a volume populated by a ConfigMap)\n"+ " Name:\t%v\n"+ @@ -824,7 +867,27 @@ func printConfigMapVolumeSource(configMap *api.ConfigMapVolumeSource, w PrefixWr configMap.Name, optional) } -func printNFSVolumeSource(nfs *api.NFSVolumeSource, w PrefixWriter) { +func printProjectedVolumeSource(projected *corev1.ProjectedVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tProjected (a volume that contains injected data from multiple sources)\n") + for _, source := range projected.Sources { + if source.Secret != nil { + w.Write(LEVEL_2, "SecretName:\t%v\n"+ + " SecretOptionalName:\t%v\n", + source.Secret.Name, source.Secret.Optional) + } else if source.DownwardAPI != nil { + w.Write(LEVEL_2, "DownwardAPI:\ttrue\n") + } else if source.ConfigMap != nil { + w.Write(LEVEL_2, "ConfigMapName:\t%v\n"+ + " ConfigMapOptional:\t%v\n", + source.ConfigMap.Name, source.ConfigMap.Optional) + } else if source.ServiceAccountToken != nil { + w.Write(LEVEL_2, "TokenExpirationSeconds:\t%v\n", + source.ServiceAccountToken.ExpirationSeconds) + } + } +} + +func printNFSVolumeSource(nfs *corev1.NFSVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tNFS (an NFS mount that lasts the lifetime of a pod)\n"+ " Server:\t%v\n"+ " Path:\t%v\n"+ @@ -832,7 +895,7 @@ func printNFSVolumeSource(nfs *api.NFSVolumeSource, w PrefixWriter) { nfs.Server, nfs.Path, nfs.ReadOnly) } -func printQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, w PrefixWriter) { +func printQuobyteVolumeSource(quobyte *corev1.QuobyteVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tQuobyte (a Quobyte mount on the host that shares a pod's lifetime)\n"+ " Registry:\t%v\n"+ " Volume:\t%v\n"+ @@ -840,13 +903,13 @@ func printQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, w PrefixWriter) quobyte.Registry, quobyte.Volume, quobyte.ReadOnly) } -func printPortworxVolumeSource(pwxVolume *api.PortworxVolumeSource, w PrefixWriter) { +func printPortworxVolumeSource(pwxVolume *corev1.PortworxVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tPortworxVolume (a Portworx Volume resource)\n"+ " VolumeID:\t%v\n", pwxVolume.VolumeID) } -func printISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, w PrefixWriter) { +func printISCSIVolumeSource(iscsi *corev1.ISCSIVolumeSource, w PrefixWriter) { initiator := "" if iscsi.InitiatorName != nil { initiator = *iscsi.InitiatorName @@ -866,7 +929,7 @@ func printISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, w PrefixWriter) { iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiator) } -func printISCSIPersistentVolumeSource(iscsi *api.ISCSIPersistentVolumeSource, w PrefixWriter) { +func printISCSIPersistentVolumeSource(iscsi *corev1.ISCSIPersistentVolumeSource, w PrefixWriter) { initiatorName := "" if iscsi.InitiatorName != nil { initiatorName = *iscsi.InitiatorName @@ -886,7 +949,7 @@ func printISCSIPersistentVolumeSource(iscsi *api.ISCSIPersistentVolumeSource, w iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiatorName) } -func printGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, w PrefixWriter) { +func printGlusterfsVolumeSource(glusterfs *corev1.GlusterfsVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+ " EndpointsName:\t%v\n"+ " Path:\t%v\n"+ @@ -894,14 +957,23 @@ func printGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, w PrefixWr glusterfs.EndpointsName, glusterfs.Path, glusterfs.ReadOnly) } -func printPersistentVolumeClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, w PrefixWriter) { +func printGlusterfsPersistentVolumeSource(glusterfs *corev1.GlusterfsPersistentVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+ + " EndpointsName:\t%v\n"+ + " EndpointsNamespace:\t%v\n"+ + " Path:\t%v\n"+ + " ReadOnly:\t%v\n", + glusterfs.EndpointsName, glusterfs.EndpointsNamespace, glusterfs.Path, glusterfs.ReadOnly) +} + +func printPersistentVolumeClaimVolumeSource(claim *corev1.PersistentVolumeClaimVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tPersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)\n"+ " ClaimName:\t%v\n"+ " ReadOnly:\t%v\n", claim.ClaimName, claim.ReadOnly) } -func printRBDVolumeSource(rbd *api.RBDVolumeSource, w PrefixWriter) { +func printRBDVolumeSource(rbd *corev1.RBDVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+ " CephMonitors:\t%v\n"+ " RBDImage:\t%v\n"+ @@ -914,7 +986,7 @@ func printRBDVolumeSource(rbd *api.RBDVolumeSource, w PrefixWriter) { rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly) } -func printRBDPersistentVolumeSource(rbd *api.RBDPersistentVolumeSource, w PrefixWriter) { +func printRBDPersistentVolumeSource(rbd *corev1.RBDPersistentVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+ " CephMonitors:\t%v\n"+ " RBDImage:\t%v\n"+ @@ -927,7 +999,7 @@ func printRBDPersistentVolumeSource(rbd *api.RBDPersistentVolumeSource, w Prefix rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly) } -func printDownwardAPIVolumeSource(d *api.DownwardAPIVolumeSource, w PrefixWriter) { +func printDownwardAPIVolumeSource(d *corev1.DownwardAPIVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tDownwardAPI (a volume populated by information about the pod)\n Items:\n") for _, mapping := range d.Items { if mapping.FieldRef != nil { @@ -939,7 +1011,7 @@ func printDownwardAPIVolumeSource(d *api.DownwardAPIVolumeSource, w PrefixWriter } } -func printAzureDiskVolumeSource(d *api.AzureDiskVolumeSource, w PrefixWriter) { +func printAzureDiskVolumeSource(d *corev1.AzureDiskVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tAzureDisk (an Azure Data Disk mount on the host and bind mount to the pod)\n"+ " DiskName:\t%v\n"+ " DiskURI:\t%v\n"+ @@ -950,7 +1022,7 @@ func printAzureDiskVolumeSource(d *api.AzureDiskVolumeSource, w PrefixWriter) { d.DiskName, d.DataDiskURI, *d.Kind, *d.FSType, *d.CachingMode, *d.ReadOnly) } -func printVsphereVolumeSource(vsphere *api.VsphereVirtualDiskVolumeSource, w PrefixWriter) { +func printVsphereVolumeSource(vsphere *corev1.VsphereVirtualDiskVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tvSphereVolume (a Persistent Disk resource in vSphere)\n"+ " VolumePath:\t%v\n"+ " FSType:\t%v\n"+ @@ -958,14 +1030,14 @@ func printVsphereVolumeSource(vsphere *api.VsphereVirtualDiskVolumeSource, w Pre vsphere.VolumePath, vsphere.FSType, vsphere.StoragePolicyName) } -func printPhotonPersistentDiskVolumeSource(photon *api.PhotonPersistentDiskVolumeSource, w PrefixWriter) { +func printPhotonPersistentDiskVolumeSource(photon *corev1.PhotonPersistentDiskVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tPhotonPersistentDisk (a Persistent Disk resource in photon platform)\n"+ " PdID:\t%v\n"+ " FSType:\t%v\n", photon.PdID, photon.FSType) } -func printCinderVolumeSource(cinder *api.CinderVolumeSource, w PrefixWriter) { +func printCinderVolumeSource(cinder *corev1.CinderVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+ " VolumeID:\t%v\n"+ " FSType:\t%v\n"+ @@ -974,7 +1046,7 @@ func printCinderVolumeSource(cinder *api.CinderVolumeSource, w PrefixWriter) { cinder.VolumeID, cinder.FSType, cinder.ReadOnly, cinder.SecretRef) } -func printCinderPersistentVolumeSource(cinder *api.CinderPersistentVolumeSource, w PrefixWriter) { +func printCinderPersistentVolumeSource(cinder *corev1.CinderPersistentVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+ " VolumeID:\t%v\n"+ " FSType:\t%v\n"+ @@ -983,7 +1055,7 @@ func printCinderPersistentVolumeSource(cinder *api.CinderPersistentVolumeSource, cinder.VolumeID, cinder.SecretRef, cinder.FSType, cinder.ReadOnly, cinder.SecretRef) } -func printScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, w PrefixWriter) { +func printScaleIOVolumeSource(sio *corev1.ScaleIOVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+ " Gateway:\t%v\n"+ " System:\t%v\n"+ @@ -996,7 +1068,7 @@ func printScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, w PrefixWriter) { sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, sio.FSType, sio.ReadOnly) } -func printScaleIOPersistentVolumeSource(sio *api.ScaleIOPersistentVolumeSource, w PrefixWriter) { +func printScaleIOPersistentVolumeSource(sio *corev1.ScaleIOPersistentVolumeSource, w PrefixWriter) { var secretNS, secretName string if sio.SecretRef != nil { secretName = sio.SecretRef.Name @@ -1016,13 +1088,13 @@ func printScaleIOPersistentVolumeSource(sio *api.ScaleIOPersistentVolumeSource, sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, secretName, secretNS, sio.FSType, sio.ReadOnly) } -func printLocalVolumeSource(ls *api.LocalVolumeSource, w PrefixWriter) { +func printLocalVolumeSource(ls *corev1.LocalVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tLocalVolume (a persistent volume backed by local storage on a node)\n"+ " Path:\t%v\n", ls.Path) } -func printCephFSVolumeSource(cephfs *api.CephFSVolumeSource, w PrefixWriter) { +func printCephFSVolumeSource(cephfs *corev1.CephFSVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+ " Monitors:\t%v\n"+ " Path:\t%v\n"+ @@ -1033,7 +1105,7 @@ func printCephFSVolumeSource(cephfs *api.CephFSVolumeSource, w PrefixWriter) { cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly) } -func printCephFSPersistentVolumeSource(cephfs *api.CephFSPersistentVolumeSource, w PrefixWriter) { +func printCephFSPersistentVolumeSource(cephfs *corev1.CephFSPersistentVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+ " Monitors:\t%v\n"+ " Path:\t%v\n"+ @@ -1044,7 +1116,7 @@ func printCephFSPersistentVolumeSource(cephfs *api.CephFSPersistentVolumeSource, cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly) } -func printStorageOSVolumeSource(storageos *api.StorageOSVolumeSource, w PrefixWriter) { +func printStorageOSVolumeSource(storageos *corev1.StorageOSVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+ " VolumeName:\t%v\n"+ " VolumeNamespace:\t%v\n"+ @@ -1053,7 +1125,7 @@ func printStorageOSVolumeSource(storageos *api.StorageOSVolumeSource, w PrefixWr storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly) } -func printStorageOSPersistentVolumeSource(storageos *api.StorageOSPersistentVolumeSource, w PrefixWriter) { +func printStorageOSPersistentVolumeSource(storageos *corev1.StorageOSPersistentVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+ " VolumeName:\t%v\n"+ " VolumeNamespace:\t%v\n"+ @@ -1062,7 +1134,7 @@ func printStorageOSPersistentVolumeSource(storageos *api.StorageOSPersistentVolu storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly) } -func printFCVolumeSource(fc *api.FCVolumeSource, w PrefixWriter) { +func printFCVolumeSource(fc *corev1.FCVolumeSource, w PrefixWriter) { lun := "" if fc.Lun != nil { lun = strconv.Itoa(int(*fc.Lun)) @@ -1075,7 +1147,7 @@ func printFCVolumeSource(fc *api.FCVolumeSource, w PrefixWriter) { strings.Join(fc.TargetWWNs, ", "), lun, fc.FSType, fc.ReadOnly) } -func printAzureFileVolumeSource(azureFile *api.AzureFileVolumeSource, w PrefixWriter) { +func printAzureFileVolumeSource(azureFile *corev1.AzureFileVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tAzureFile (an Azure File Service mount on the host and bind mount to the pod)\n"+ " SecretName:\t%v\n"+ " ShareName:\t%v\n"+ @@ -1083,7 +1155,7 @@ func printAzureFileVolumeSource(azureFile *api.AzureFileVolumeSource, w PrefixWr azureFile.SecretName, azureFile.ShareName, azureFile.ReadOnly) } -func printAzureFilePersistentVolumeSource(azureFile *api.AzureFilePersistentVolumeSource, w PrefixWriter) { +func printAzureFilePersistentVolumeSource(azureFile *corev1.AzureFilePersistentVolumeSource, w PrefixWriter) { ns := "" if azureFile.SecretNamespace != nil { ns = *azureFile.SecretNamespace @@ -1096,7 +1168,7 @@ func printAzureFilePersistentVolumeSource(azureFile *api.AzureFilePersistentVolu azureFile.SecretName, ns, azureFile.ShareName, azureFile.ReadOnly) } -func printFlexPersistentVolumeSource(flex *api.FlexPersistentVolumeSource, w PrefixWriter) { +func printFlexPersistentVolumeSource(flex *corev1.FlexPersistentVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+ " Driver:\t%v\n"+ " FSType:\t%v\n"+ @@ -1106,7 +1178,7 @@ func printFlexPersistentVolumeSource(flex *api.FlexPersistentVolumeSource, w Pre flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options) } -func printFlexVolumeSource(flex *api.FlexVolumeSource, w PrefixWriter) { +func printFlexVolumeSource(flex *corev1.FlexVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+ " Driver:\t%v\n"+ " FSType:\t%v\n"+ @@ -1116,14 +1188,14 @@ func printFlexVolumeSource(flex *api.FlexVolumeSource, w PrefixWriter) { flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options) } -func printFlockerVolumeSource(flocker *api.FlockerVolumeSource, w PrefixWriter) { +func printFlockerVolumeSource(flocker *corev1.FlockerVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tFlocker (a Flocker volume mounted by the Flocker agent)\n"+ " DatasetName:\t%v\n"+ " DatasetUUID:\t%v\n", flocker.DatasetName, flocker.DatasetUUID) } -func printCSIPersistentVolumeSource(csi *api.CSIPersistentVolumeSource, w PrefixWriter) { +func printCSIPersistentVolumeSource(csi *corev1.CSIPersistentVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tCSI (a Container Storage Interface (CSI) volume source)\n"+ " Driver:\t%v\n"+ " VolumeHandle:\t%v\n"+ @@ -1177,7 +1249,7 @@ type PersistentVolumeDescriber struct { clientset.Interface } -func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().PersistentVolumes() pv, err := c.Get(name, metav1.GetOptions{}) @@ -1185,15 +1257,15 @@ func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSe return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, pv) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, pv) } return describePersistentVolume(pv, events) } -func printVolumeNodeAffinity(w PrefixWriter, affinity *api.VolumeNodeAffinity) { +func printVolumeNodeAffinity(w PrefixWriter, affinity *corev1.VolumeNodeAffinity) { w.Write(LEVEL_0, "Node Affinity:\t") if affinity == nil || affinity.Required == nil { w.WriteLine("") @@ -1215,7 +1287,7 @@ func printVolumeNodeAffinity(w PrefixWriter, affinity *api.VolumeNodeAffinity) { } // printLabelsMultiline prints multiple labels with a user-defined alignment. -func printNodeSelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []api.NodeSelectorRequirement) { +func printNodeSelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []corev1.NodeSelectorRequirement) { w.Write(indentLevel, "%s:%s", title, innerIndent) if len(reqs) == 0 { @@ -1235,14 +1307,14 @@ func printNodeSelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, } } -func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) (string, error) { +func describePersistentVolume(pv *corev1.PersistentVolume, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", pv.Name) printLabelsMultiline(w, "Labels", pv.ObjectMeta.Labels) printAnnotationsMultiline(w, "Annotations", pv.ObjectMeta.Annotations) w.Write(LEVEL_0, "Finalizers:\t%v\n", pv.ObjectMeta.Finalizers) - w.Write(LEVEL_0, "StorageClass:\t%s\n", helper.GetPersistentVolumeClass(pv)) + w.Write(LEVEL_0, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClass(pv)) if pv.ObjectMeta.DeletionTimestamp != nil { w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampUntil(*pv.ObjectMeta.DeletionTimestamp)) } else { @@ -1254,11 +1326,11 @@ func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) ( w.Write(LEVEL_0, "Claim:\t%s\n", "") } w.Write(LEVEL_0, "Reclaim Policy:\t%v\n", pv.Spec.PersistentVolumeReclaimPolicy) - w.Write(LEVEL_0, "Access Modes:\t%s\n", helper.GetAccessModesAsString(pv.Spec.AccessModes)) + w.Write(LEVEL_0, "Access Modes:\t%s\n", storageutil.GetAccessModesAsString(pv.Spec.AccessModes)) if pv.Spec.VolumeMode != nil { w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pv.Spec.VolumeMode) } - storage := pv.Spec.Capacity[api.ResourceStorage] + storage := pv.Spec.Capacity[corev1.ResourceStorage] w.Write(LEVEL_0, "Capacity:\t%s\n", storage.String()) printVolumeNodeAffinity(w, pv.Spec.NodeAffinity) w.Write(LEVEL_0, "Message:\t%s\n", pv.Status.Message) @@ -1276,7 +1348,7 @@ func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) ( case pv.Spec.ISCSI != nil: printISCSIPersistentVolumeSource(pv.Spec.ISCSI, w) case pv.Spec.Glusterfs != nil: - printGlusterfsVolumeSource(pv.Spec.Glusterfs, w) + printGlusterfsPersistentVolumeSource(pv.Spec.Glusterfs, w) case pv.Spec.RBD != nil: printRBDPersistentVolumeSource(pv.Spec.RBD, w) case pv.Spec.Quobyte != nil: @@ -1325,7 +1397,7 @@ type PersistentVolumeClaimDescriber struct { clientset.Interface } -func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().PersistentVolumeClaims(namespace) pvc, err := c.Get(name, metav1.GetOptions{}) @@ -1340,18 +1412,18 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, descri return "", err } - events, _ := d.Core().Events(namespace).Search(legacyscheme.Scheme, pvc) + events, _ := d.Core().Events(namespace).Search(scheme.Scheme, pvc) return describePersistentVolumeClaim(pvc, events, mountPods) } -func getMountPods(c coreclient.PodInterface, pvcName string) ([]api.Pod, error) { +func getMountPods(c corev1client.PodInterface, pvcName string) ([]corev1.Pod, error) { nsPods, err := c.List(metav1.ListOptions{}) if err != nil { - return []api.Pod{}, err + return []corev1.Pod{}, err } - var pods []api.Pod + var pods []corev1.Pod for _, pod := range nsPods.Items { pvcs := getPvcs(pod.Spec.Volumes) @@ -1366,8 +1438,8 @@ func getMountPods(c coreclient.PodInterface, pvcName string) ([]api.Pod, error) return pods, nil } -func getPvcs(volumes []api.Volume) []api.Volume { - var pvcs []api.Volume +func getPvcs(volumes []corev1.Volume) []corev1.Volume { + var pvcs []corev1.Volume for _, volume := range volumes { if volume.VolumeSource.PersistentVolumeClaim != nil { @@ -1378,12 +1450,12 @@ func getPvcs(volumes []api.Volume) []api.Volume { return pvcs } -func describePersistentVolumeClaim(pvc *api.PersistentVolumeClaim, events *api.EventList, mountPods []api.Pod) (string, error) { +func describePersistentVolumeClaim(pvc *corev1.PersistentVolumeClaim, events *corev1.EventList, mountPods []corev1.Pod) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", pvc.Name) w.Write(LEVEL_0, "Namespace:\t%s\n", pvc.Namespace) - w.Write(LEVEL_0, "StorageClass:\t%s\n", helper.GetPersistentVolumeClaimClass(pvc)) + w.Write(LEVEL_0, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClaimClass(pvc)) if pvc.ObjectMeta.DeletionTimestamp != nil { w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampUntil(*pvc.ObjectMeta.DeletionTimestamp)) } else { @@ -1393,12 +1465,12 @@ func describePersistentVolumeClaim(pvc *api.PersistentVolumeClaim, events *api.E printLabelsMultiline(w, "Labels", pvc.Labels) printAnnotationsMultiline(w, "Annotations", pvc.Annotations) w.Write(LEVEL_0, "Finalizers:\t%v\n", pvc.ObjectMeta.Finalizers) - storage := pvc.Spec.Resources.Requests[api.ResourceStorage] + storage := pvc.Spec.Resources.Requests[corev1.ResourceStorage] capacity := "" accessModes := "" if pvc.Spec.VolumeName != "" { - accessModes = helper.GetAccessModesAsString(pvc.Status.AccessModes) - storage = pvc.Status.Capacity[api.ResourceStorage] + accessModes = storageutil.GetAccessModesAsString(pvc.Status.AccessModes) + storage = pvc.Status.Capacity[corev1.ResourceStorage] capacity = storage.String() } w.Write(LEVEL_0, "Capacity:\t%s\n", capacity) @@ -1430,9 +1502,9 @@ func describePersistentVolumeClaim(pvc *api.PersistentVolumeClaim, events *api.E }) } -func describeContainers(label string, containers []api.Container, containerStatuses []api.ContainerStatus, +func describeContainers(label string, containers []corev1.Container, containerStatuses []corev1.ContainerStatus, resolverFn EnvVarResolverFunc, w PrefixWriter, space string) { - statuses := map[string]api.ContainerStatus{} + statuses := map[string]corev1.ContainerStatus{} for _, status := range containerStatuses { statuses[status.Name] = status } @@ -1456,7 +1528,7 @@ func describeContainers(label string, containers []api.Container, containerStatu } } -func describeContainersLabel(containers []api.Container, label, space string, w PrefixWriter) { +func describeContainersLabel(containers []corev1.Container, label, space string, w PrefixWriter) { none := "" if len(containers) == 0 { none = " " @@ -1464,7 +1536,7 @@ func describeContainersLabel(containers []api.Container, label, space string, w w.Write(LEVEL_0, "%s%s:%s\n", space, label, none) } -func describeContainerBasicInfo(container api.Container, status api.ContainerStatus, ok bool, space string, w PrefixWriter) { +func describeContainerBasicInfo(container corev1.Container, status corev1.ContainerStatus, ok bool, space string, w PrefixWriter) { nameIndent := "" if len(space) > 0 { nameIndent = " " @@ -1491,7 +1563,7 @@ func describeContainerBasicInfo(container api.Container, status api.ContainerSta } } -func describeContainerPorts(cPorts []api.ContainerPort) string { +func describeContainerPorts(cPorts []corev1.ContainerPort) string { ports := make([]string, 0, len(cPorts)) for _, cPort := range cPorts { ports = append(ports, fmt.Sprintf("%d/%s", cPort.ContainerPort, cPort.Protocol)) @@ -1499,7 +1571,7 @@ func describeContainerPorts(cPorts []api.ContainerPort) string { return strings.Join(ports, ", ") } -func describeContainerHostPorts(cPorts []api.ContainerPort) string { +func describeContainerHostPorts(cPorts []corev1.ContainerPort) string { ports := make([]string, 0, len(cPorts)) for _, cPort := range cPorts { ports = append(ports, fmt.Sprintf("%d/%s", cPort.HostPort, cPort.Protocol)) @@ -1507,7 +1579,7 @@ func describeContainerHostPorts(cPorts []api.ContainerPort) string { return strings.Join(ports, ", ") } -func describeContainerCommand(container api.Container, w PrefixWriter) { +func describeContainerCommand(container corev1.Container, w PrefixWriter) { if len(container.Command) > 0 { w.Write(LEVEL_2, "Command:\n") for _, c := range container.Command { @@ -1526,7 +1598,7 @@ func describeContainerCommand(container api.Container, w PrefixWriter) { } } -func describeContainerResource(container api.Container, w PrefixWriter) { +func describeContainerResource(container corev1.Container, w PrefixWriter) { resources := container.Resources if len(resources.Limits) > 0 { w.Write(LEVEL_2, "Limits:\n") @@ -1545,7 +1617,7 @@ func describeContainerResource(container api.Container, w PrefixWriter) { } } -func describeContainerState(status api.ContainerStatus, w PrefixWriter) { +func describeContainerState(status corev1.ContainerStatus, w PrefixWriter) { describeStatus("State", status.State, w) if status.LastTerminationState.Terminated != nil { describeStatus("Last State", status.LastTerminationState, w) @@ -1554,7 +1626,7 @@ func describeContainerState(status api.ContainerStatus, w PrefixWriter) { w.Write(LEVEL_2, "Restart Count:\t%d\n", status.RestartCount) } -func describeContainerProbe(container api.Container, w PrefixWriter) { +func describeContainerProbe(container corev1.Container, w PrefixWriter) { if container.LivenessProbe != nil { probe := DescribeProbe(container.LivenessProbe) w.Write(LEVEL_2, "Liveness:\t%s\n", probe) @@ -1565,7 +1637,7 @@ func describeContainerProbe(container api.Container, w PrefixWriter) { } } -func describeContainerVolumes(container api.Container, w PrefixWriter) { +func describeContainerVolumes(container corev1.Container, w PrefixWriter) { // Show volumeMounts none := "" if len(container.VolumeMounts) == 0 { @@ -1595,7 +1667,7 @@ func describeContainerVolumes(container api.Container, w PrefixWriter) { } } -func describeContainerEnvVars(container api.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) { +func describeContainerEnvVars(container corev1.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) { none := "" if len(container.Env) == 0 { none = "\t" @@ -1641,7 +1713,7 @@ func describeContainerEnvVars(container api.Container, resolverFn EnvVarResolver } } -func describeContainerEnvFrom(container api.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) { +func describeContainerEnvFrom(container corev1.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) { none := "" if len(container.EnvFrom) == 0 { none = "\t" @@ -1670,7 +1742,7 @@ func describeContainerEnvFrom(container api.Container, resolverFn EnvVarResolver } // DescribeProbe is exported for consumers in other API groups that have probes -func DescribeProbe(probe *api.Probe) string { +func DescribeProbe(probe *corev1.Probe) string { attrs := fmt.Sprintf("delay=%ds timeout=%ds period=%ds #success=%d #failure=%d", probe.InitialDelaySeconds, probe.TimeoutSeconds, probe.PeriodSeconds, probe.SuccessThreshold, probe.FailureThreshold) switch { case probe.Exec != nil: @@ -1691,17 +1763,17 @@ func DescribeProbe(probe *api.Probe) string { return fmt.Sprintf("unknown %s", attrs) } -type EnvVarResolverFunc func(e api.EnvVar) string +type EnvVarResolverFunc func(e corev1.EnvVar) string // EnvValueFrom is exported for use by describers in other packages -func EnvValueRetriever(pod *api.Pod) EnvVarResolverFunc { - return func(e api.EnvVar) string { +func EnvValueRetriever(pod *corev1.Pod) EnvVarResolverFunc { + return func(e corev1.EnvVar) string { gv, err := schema.ParseGroupVersion(e.ValueFrom.FieldRef.APIVersion) if err != nil { return "" } gvk := gv.WithKind("Pod") - internalFieldPath, _, err := legacyscheme.Scheme.ConvertFieldLabel(gvk, e.ValueFrom.FieldRef.FieldPath, "") + internalFieldPath, _, err := scheme.Scheme.ConvertFieldLabel(gvk, e.ValueFrom.FieldRef.FieldPath, "") if err != nil { return "" // pod validation should catch this on create } @@ -1715,7 +1787,7 @@ func EnvValueRetriever(pod *api.Pod) EnvVarResolverFunc { } } -func describeStatus(stateName string, state api.ContainerState, w PrefixWriter) { +func describeStatus(stateName string, state corev1.ContainerState, w PrefixWriter) { switch { case state.Running != nil: w.Write(LEVEL_2, "%s:\tRunning\n", stateName) @@ -1744,7 +1816,7 @@ func describeStatus(stateName string, state api.ContainerState, w PrefixWriter) } } -func describeVolumeClaimTemplates(templates []api.PersistentVolumeClaim, w PrefixWriter) { +func describeVolumeClaimTemplates(templates []corev1.PersistentVolumeClaim, w PrefixWriter) { if len(templates) == 0 { w.Write(LEVEL_0, "Volume Claims:\t\n") return @@ -1752,10 +1824,10 @@ func describeVolumeClaimTemplates(templates []api.PersistentVolumeClaim, w Prefi w.Write(LEVEL_0, "Volume Claims:\n") for _, pvc := range templates { w.Write(LEVEL_1, "Name:\t%s\n", pvc.Name) - w.Write(LEVEL_1, "StorageClass:\t%s\n", helper.GetPersistentVolumeClaimClass(&pvc)) + w.Write(LEVEL_1, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClaimClass(&pvc)) printLabelsMultilineWithIndent(w, " ", "Labels", "\t", pvc.Labels, sets.NewString()) printLabelsMultilineWithIndent(w, " ", "Annotations", "\t", pvc.Annotations, sets.NewString()) - if capacity, ok := pvc.Spec.Resources.Requests[api.ResourceStorage]; ok { + if capacity, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; ok { w.Write(LEVEL_1, "Capacity:\t%s\n", capacity.String()) } else { w.Write(LEVEL_1, "Capacity:\t%s\n", "") @@ -1786,7 +1858,7 @@ type ReplicationControllerDescriber struct { clientset.Interface } -func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { rc := d.Core().ReplicationControllers(namespace) pc := d.Core().Pods(namespace) @@ -1800,15 +1872,15 @@ func (d *ReplicationControllerDescriber) Describe(namespace, name string, descri return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, controller) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, controller) } return describeReplicationController(controller, events, running, waiting, succeeded, failed) } -func describeReplicationController(controller *api.ReplicationController, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { +func describeReplicationController(controller *corev1.ReplicationController, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", controller.Name) @@ -1816,7 +1888,7 @@ func describeReplicationController(controller *api.ReplicationController, events w.Write(LEVEL_0, "Selector:\t%s\n", labels.FormatLabels(controller.Spec.Selector)) printLabelsMultiline(w, "Labels", controller.Labels) printAnnotationsMultiline(w, "Annotations", controller.Annotations) - w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, controller.Spec.Replicas) + w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, *controller.Spec.Replicas) w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) DescribePodTemplate(controller.Spec.Template, w) if len(controller.Status.Conditions) > 0 { @@ -1833,7 +1905,7 @@ func describeReplicationController(controller *api.ReplicationController, events }) } -func DescribePodTemplate(template *api.PodTemplateSpec, w PrefixWriter) { +func DescribePodTemplate(template *corev1.PodTemplateSpec, w PrefixWriter) { w.Write(LEVEL_0, "Pod Template:\n") if template == nil { w.Write(LEVEL_1, "") @@ -1858,8 +1930,8 @@ type ReplicaSetDescriber struct { clientset.Interface } -func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { - rsc := d.Extensions().ReplicaSets(namespace) +func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { + rsc := d.Apps().ReplicaSets(namespace) pc := d.Core().Pods(namespace) rs, err := rsc.Get(name, metav1.GetOptions{}) @@ -1874,15 +1946,15 @@ func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings running, waiting, succeeded, failed, getPodErr := getPodStatusForController(pc, selector, rs.UID) - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, rs) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, rs) } return describeReplicaSet(rs, events, running, waiting, succeeded, failed, getPodErr) } -func describeReplicaSet(rs *extensions.ReplicaSet, events *api.EventList, running, waiting, succeeded, failed int, getPodErr error) (string, error) { +func describeReplicaSet(rs *appsv1.ReplicaSet, events *corev1.EventList, running, waiting, succeeded, failed int, getPodErr error) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", rs.Name) @@ -1893,7 +1965,7 @@ func describeReplicaSet(rs *extensions.ReplicaSet, events *api.EventList, runnin if controlledBy := printController(rs); len(controlledBy) > 0 { w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy) } - w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", rs.Status.Replicas, rs.Spec.Replicas) + w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", rs.Status.Replicas, *rs.Spec.Replicas) w.Write(LEVEL_0, "Pods Status:\t") if getPodErr != nil { w.Write(LEVEL_0, "error in fetching pods: %s\n", getPodErr) @@ -1920,21 +1992,21 @@ type JobDescriber struct { clientset.Interface } -func (d *JobDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *JobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { job, err := d.Batch().Jobs(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, job) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, job) } return describeJob(job, events) } -func describeJob(job *batch.Job, events *api.EventList) (string, error) { +func describeJob(job *batchv1.Job, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", job.Name) @@ -1975,30 +2047,23 @@ func describeJob(job *batch.Job, events *api.EventList) (string, error) { // CronJobDescriber generates information about a cron job and the jobs it has created. type CronJobDescriber struct { - clientset.Interface - external externalclient.Interface + client clientset.Interface } -func (d *CronJobDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { - cronJob, err := d.external.BatchV1beta1().CronJobs(namespace).Get(name, metav1.GetOptions{}) +func (d *CronJobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { + cronJob, err := d.client.BatchV1beta1().CronJobs(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, cronJob) - } - - internalCronJob := &batch.CronJob{} - if err := legacyscheme.Scheme.Convert(cronJob, internalCronJob, nil); err != nil { - return "", err + events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, cronJob) } - - return describeCronJob(internalCronJob, events) + return describeCronJob(cronJob, events) } -func describeCronJob(cronJob *batch.CronJob, events *api.EventList) (string, error) { +func describeCronJob(cronJob *batchv1beta1.CronJob, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", cronJob.Name) @@ -2027,7 +2092,7 @@ func describeCronJob(cronJob *batch.CronJob, events *api.EventList) (string, err }) } -func describeJobTemplate(jobTemplate batch.JobTemplateSpec, w PrefixWriter) { +func describeJobTemplate(jobTemplate batchv1beta1.JobTemplateSpec, w PrefixWriter) { if jobTemplate.Spec.Selector != nil { selector, _ := metav1.LabelSelectorAsSelector(jobTemplate.Spec.Selector) w.Write(LEVEL_0, "Selector:\t%s\n", selector) @@ -2050,7 +2115,7 @@ func describeJobTemplate(jobTemplate batch.JobTemplateSpec, w PrefixWriter) { DescribePodTemplate(&jobTemplate.Spec.Template, w) } -func printActiveJobs(w PrefixWriter, title string, jobs []api.ObjectReference) { +func printActiveJobs(w PrefixWriter, title string, jobs []corev1.ObjectReference) { w.Write(LEVEL_0, "%s:\t", title) if len(jobs) == 0 { w.WriteLine("") @@ -2071,8 +2136,8 @@ type DaemonSetDescriber struct { clientset.Interface } -func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { - dc := d.Extensions().DaemonSets(namespace) +func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { + dc := d.Apps().DaemonSets(namespace) pc := d.Core().Pods(namespace) daemon, err := dc.Get(name, metav1.GetOptions{}) @@ -2089,15 +2154,15 @@ func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, daemon) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, daemon) } return describeDaemonSet(daemon, events, running, waiting, succeeded, failed) } -func describeDaemonSet(daemon *extensions.DaemonSet, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { +func describeDaemonSet(daemon *appsv1.DaemonSet, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", daemon.Name) @@ -2129,7 +2194,7 @@ type SecretDescriber struct { clientset.Interface } -func (d *SecretDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *SecretDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().Secrets(namespace) secret, err := c.Get(name, metav1.GetOptions{}) @@ -2140,13 +2205,13 @@ func (d *SecretDescriber) Describe(namespace, name string, describerSettings pri return describeSecret(secret) } -func describeSecret(secret *api.Secret) (string, error) { +func describeSecret(secret *corev1.Secret) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", secret.Name) w.Write(LEVEL_0, "Namespace:\t%s\n", secret.Namespace) printLabelsMultiline(w, "Labels", secret.Labels) - skipAnnotations := sets.NewString(api.LastAppliedConfigAnnotation) + skipAnnotations := sets.NewString(corev1.LastAppliedConfigAnnotation) printAnnotationsMultilineWithFilter(w, "Annotations", secret.Annotations, skipAnnotations) w.Write(LEVEL_0, "\nType:\t%s\n", secret.Type) @@ -2154,7 +2219,7 @@ func describeSecret(secret *api.Secret) (string, error) { w.Write(LEVEL_0, "\nData\n====\n") for k, v := range secret.Data { switch { - case k == api.ServiceAccountTokenKey && secret.Type == api.SecretTypeServiceAccountToken: + case k == corev1.ServiceAccountTokenKey && secret.Type == corev1.SecretTypeServiceAccountToken: w.Write(LEVEL_0, "%s:\t%s\n", k, string(v)) default: w.Write(LEVEL_0, "%s:\t%d bytes\n", k, len(v)) @@ -2169,7 +2234,7 @@ type IngressDescriber struct { clientset.Interface } -func (i *IngressDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (i *IngressDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := i.Extensions().Ingresses(namespace) ing, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -2178,7 +2243,7 @@ func (i *IngressDescriber) Describe(namespace, name string, describerSettings pr return i.describeIngress(ing, describerSettings) } -func (i *IngressDescriber) describeBackend(ns string, backend *extensions.IngressBackend) string { +func (i *IngressDescriber) describeBackend(ns string, backend *extensionsv1beta1.IngressBackend) string { endpoints, _ := i.Core().Endpoints(ns).Get(backend.ServiceName, metav1.GetOptions{}) service, _ := i.Core().Services(ns).Get(backend.ServiceName, metav1.GetOptions{}) spName := "" @@ -2198,7 +2263,7 @@ func (i *IngressDescriber) describeBackend(ns string, backend *extensions.Ingres return formatEndpoints(endpoints, sets.NewString(spName)) } -func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSettings printers.DescriberSettings) (string, error) { +func (i *IngressDescriber) describeIngress(ing *extensionsv1beta1.Ingress, describerSettings describe.DescriberSettings) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%v\n", ing.Name) @@ -2209,7 +2274,7 @@ func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSet if def == nil { // Ingresses that don't specify a default backend inherit the // default backend in the kube-system namespace. - def = &extensions.IngressBackend{ + def = &extensionsv1beta1.IngressBackend{ ServiceName: "default-http-backend", ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80}, } @@ -2242,7 +2307,7 @@ func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSet describeIngressAnnotations(w, ing.Annotations) if describerSettings.ShowEvents { - events, _ := i.Core().Events(ing.Namespace).Search(legacyscheme.Scheme, ing) + events, _ := i.Core().Events(ing.Namespace).Search(scheme.Scheme, ing) if events != nil { DescribeEvents(events, w) } @@ -2251,7 +2316,7 @@ func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSet }) } -func describeIngressTLS(w PrefixWriter, ingTLS []extensions.IngressTLS) { +func describeIngressTLS(w PrefixWriter, ingTLS []extensionsv1beta1.IngressTLS) { w.Write(LEVEL_0, "TLS:\n") for _, t := range ingTLS { if t.SecretName == "" { @@ -2277,7 +2342,7 @@ type ServiceDescriber struct { clientset.Interface } -func (d *ServiceDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ServiceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().Services(namespace) service, err := c.Get(name, metav1.GetOptions{}) @@ -2286,14 +2351,14 @@ func (d *ServiceDescriber) Describe(namespace, name string, describerSettings pr } endpoints, _ := d.Core().Endpoints(namespace).Get(name, metav1.GetOptions{}) - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, service) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, service) } return describeService(service, endpoints, events) } -func buildIngressString(ingress []api.LoadBalancerIngress) string { +func buildIngressString(ingress []corev1.LoadBalancerIngress) string { var buffer bytes.Buffer for i := range ingress { @@ -2309,9 +2374,9 @@ func buildIngressString(ingress []api.LoadBalancerIngress) string { return buffer.String() } -func describeService(service *api.Service, endpoints *api.Endpoints, events *api.EventList) (string, error) { +func describeService(service *corev1.Service, endpoints *corev1.Endpoints, events *corev1.EventList) (string, error) { if endpoints == nil { - endpoints = &api.Endpoints{} + endpoints = &corev1.Endpoints{} } return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) @@ -2375,7 +2440,7 @@ type EndpointsDescriber struct { clientset.Interface } -func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().Endpoints(namespace) ep, err := c.Get(name, metav1.GetOptions{}) @@ -2383,15 +2448,15 @@ func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, ep) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, ep) } return describeEndpoints(ep, events) } -func describeEndpoints(ep *api.Endpoints, events *api.EventList) (string, error) { +func describeEndpoints(ep *corev1.Endpoints, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", ep.Name) @@ -2450,7 +2515,7 @@ type ServiceAccountDescriber struct { clientset.Interface } -func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().ServiceAccounts(namespace) serviceAccount, err := c.Get(name, metav1.GetOptions{}) @@ -2458,7 +2523,7 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett return "", err } - tokens := []api.Secret{} + tokens := []corev1.Secret{} // missingSecrets is the set of all secrets present in the // serviceAccount but not present in the set of existing secrets. @@ -2473,9 +2538,9 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett existingSecrets := sets.NewString() for _, s := range secrets.Items { - if s.Type == api.SecretTypeServiceAccountToken { - name, _ := s.Annotations[api.ServiceAccountNameKey] - uid, _ := s.Annotations[api.ServiceAccountUIDKey] + if s.Type == corev1.SecretTypeServiceAccountToken { + name, _ := s.Annotations[corev1.ServiceAccountNameKey] + uid, _ := s.Annotations[corev1.ServiceAccountUIDKey] if name == serviceAccount.Name && uid == string(serviceAccount.UID) { tokens = append(tokens, s) } @@ -2495,15 +2560,15 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett } } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, serviceAccount) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, serviceAccount) } return describeServiceAccount(serviceAccount, tokens, missingSecrets, events) } -func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Secret, missingSecrets sets.String, events *api.EventList) (string, error) { +func describeServiceAccount(serviceAccount *corev1.ServiceAccount, tokens []corev1.Secret, missingSecrets sets.String, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", serviceAccount.Name) @@ -2564,10 +2629,10 @@ func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Sec // RoleDescriber generates information about a node. type RoleDescriber struct { - externalclient.Interface + clientset.Interface } -func (d *RoleDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *RoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { role, err := d.Rbac().Roles(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2575,14 +2640,14 @@ func (d *RoleDescriber) Describe(namespace, name string, describerSettings print breakdownRules := []rbacv1.PolicyRule{} for _, rule := range role.Rules { - breakdownRules = append(breakdownRules, validation.BreakdownRule(rule)...) + breakdownRules = append(breakdownRules, rbac.BreakdownRule(rule)...) } - compactRules, err := validation.CompactRules(breakdownRules) + compactRules, err := rbac.CompactRules(breakdownRules) if err != nil { return "", err } - sort.Stable(rbacv1helpers.SortableRuleSlice(compactRules)) + sort.Stable(rbac.SortableRuleSlice(compactRules)) return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) @@ -2603,10 +2668,10 @@ func (d *RoleDescriber) Describe(namespace, name string, describerSettings print // ClusterRoleDescriber generates information about a node. type ClusterRoleDescriber struct { - externalclient.Interface + clientset.Interface } -func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { role, err := d.Rbac().ClusterRoles().Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2614,14 +2679,14 @@ func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSetting breakdownRules := []rbacv1.PolicyRule{} for _, rule := range role.Rules { - breakdownRules = append(breakdownRules, validation.BreakdownRule(rule)...) + breakdownRules = append(breakdownRules, rbac.BreakdownRule(rule)...) } - compactRules, err := validation.CompactRules(breakdownRules) + compactRules, err := rbac.CompactRules(breakdownRules) if err != nil { return "", err } - sort.Stable(rbacv1helpers.SortableRuleSlice(compactRules)) + sort.Stable(rbac.SortableRuleSlice(compactRules)) return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) @@ -2659,10 +2724,10 @@ func combineResourceGroup(resource, group []string) string { // RoleBindingDescriber generates information about a node. type RoleBindingDescriber struct { - externalclient.Interface + clientset.Interface } -func (d *RoleBindingDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *RoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { binding, err := d.Rbac().RoleBindings(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2691,10 +2756,10 @@ func (d *RoleBindingDescriber) Describe(namespace, name string, describerSetting // ClusterRoleBindingDescriber generates information about a node. type ClusterRoleBindingDescriber struct { - externalclient.Interface + clientset.Interface } -func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { binding, err := d.Rbac().ClusterRoleBindings().Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2726,14 +2791,14 @@ type NodeDescriber struct { clientset.Interface } -func (d *NodeDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *NodeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { mc := d.Core().Nodes() node, err := mc.Get(name, metav1.GetOptions{}) if err != nil { return "", err } - fieldSelector, err := fields.ParseSelector("spec.nodeName=" + name + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed)) + fieldSelector, err := fields.ParseSelector("spec.nodeName=" + name + ",status.phase!=" + string(corev1.PodSucceeded) + ",status.phase!=" + string(corev1.PodFailed)) if err != nil { return "", err } @@ -2748,21 +2813,21 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings print canViewPods = false } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - if ref, err := ref.GetReference(legacyscheme.Scheme, node); err != nil { - glog.Errorf("Unable to construct reference to '%#v': %v", node, err) + if ref, err := reference.GetReference(scheme.Scheme, node); err != nil { + klog.Errorf("Unable to construct reference to '%#v': %v", node, err) } else { // TODO: We haven't decided the namespace for Node object yet. ref.UID = types.UID(ref.Name) - events, _ = d.Core().Events("").Search(legacyscheme.Scheme, ref) + events, _ = d.Core().Events("").Search(scheme.Scheme, ref) } } return describeNode(node, nodeNonTerminatedPodsList, events, canViewPods) } -func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events *api.EventList, canViewPods bool) (string, error) { +func describeNode(node *corev1.Node, nodeNonTerminatedPodsList *corev1.PodList, events *corev1.EventList, canViewPods bool) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", node.Name) @@ -2795,8 +2860,8 @@ func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events w.Write(LEVEL_1, "%s:\t%s\n", address.Type, address.Address) } - printResourceList := func(resourceList api.ResourceList) { - resources := make([]api.ResourceName, 0, len(resourceList)) + printResourceList := func(resourceList corev1.ResourceList) { + resources := make([]corev1.ResourceName, 0, len(resourceList)) for resource := range resourceList { resources = append(resources, resource) } @@ -2850,7 +2915,7 @@ type StatefulSetDescriber struct { client clientset.Interface } -func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { ps, err := p.client.Apps().StatefulSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2867,15 +2932,15 @@ func (p *StatefulSetDescriber) Describe(namespace, name string, describerSetting return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = p.client.Core().Events(namespace).Search(legacyscheme.Scheme, ps) + events, _ = p.client.Core().Events(namespace).Search(scheme.Scheme, ps) } return describeStatefulSet(ps, selector, events, running, waiting, succeeded, failed) } -func describeStatefulSet(ps *apps.StatefulSet, selector labels.Selector, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { +func describeStatefulSet(ps *appsv1.StatefulSet, selector labels.Selector, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", ps.ObjectMeta.Name) @@ -2888,7 +2953,7 @@ func describeStatefulSet(ps *apps.StatefulSet, selector labels.Selector, events w.Write(LEVEL_0, "Update Strategy:\t%s\n", ps.Spec.UpdateStrategy.Type) if ps.Spec.UpdateStrategy.RollingUpdate != nil { ru := ps.Spec.UpdateStrategy.RollingUpdate - if ru.Partition != 0 { + if ru.Partition != nil { w.Write(LEVEL_1, "Partition:\t%d\n", ru.Partition) } } @@ -2908,13 +2973,13 @@ type CertificateSigningRequestDescriber struct { client clientset.Interface } -func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { csr, err := p.client.Certificates().CertificateSigningRequests().Get(name, metav1.GetOptions{}) if err != nil { return "", err } - cr, err := certificates.ParseCSR(csr) + cr, err := certificate.ParseCSR(csr) if err != nil { return "", fmt.Errorf("Error parsing CSR: %v", err) } @@ -2923,15 +2988,15 @@ func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, de return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = p.client.Core().Events(namespace).Search(legacyscheme.Scheme, csr) + events, _ = p.client.Core().Events(namespace).Search(scheme.Scheme, csr) } return describeCertificateSigningRequest(csr, cr, status, events) } -func describeCertificateSigningRequest(csr *certificates.CertificateSigningRequest, cr *x509.CertificateRequest, status string, events *api.EventList) (string, error) { +func describeCertificateSigningRequest(csr *certificatesv1beta1.CertificateSigningRequest, cr *x509.CertificateRequest, status string, events *corev1.EventList) (string, error) { printListHelper := func(w PrefixWriter, prefix, name string, values []string) { if len(values) == 0 { return @@ -2985,21 +3050,21 @@ type HorizontalPodAutoscalerDescriber struct { client clientset.Interface } -func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { - hpa, err := d.client.Autoscaling().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{}) +func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { + hpa, err := d.client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.client.Core().Events(namespace).Search(legacyscheme.Scheme, hpa) + events, _ = d.client.Core().Events(namespace).Search(scheme.Scheme, hpa) } return describeHorizontalPodAutoscaler(hpa, events, d) } -func describeHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, events *api.EventList, d *HorizontalPodAutoscalerDescriber) (string, error) { +func describeHorizontalPodAutoscaler(hpa *autoscalingv2beta2.HorizontalPodAutoscaler, events *corev1.EventList, d *HorizontalPodAutoscalerDescriber) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", hpa.Name) @@ -3013,7 +3078,7 @@ func describeHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, e w.Write(LEVEL_0, "Metrics:\t( current / target )\n") for i, metric := range hpa.Spec.Metrics { switch metric.Type { - case autoscaling.ExternalMetricSourceType: + case autoscalingv2beta2.ExternalMetricSourceType: if metric.External.Target.AverageValue != nil { current := "" if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].External != nil && @@ -3029,19 +3094,19 @@ func describeHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, e w.Write(LEVEL_1, "%q (target value):\t%s / %s\n", metric.External.Metric.Name, current, metric.External.Target.Value.String()) } - case autoscaling.PodsMetricSourceType: + case autoscalingv2beta2.PodsMetricSourceType: current := "" if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Pods != nil { current = hpa.Status.CurrentMetrics[i].Pods.Current.AverageValue.String() } w.Write(LEVEL_1, "%q on pods:\t%s / %s\n", metric.Pods.Metric.Name, current, metric.Pods.Target.AverageValue.String()) - case autoscaling.ObjectMetricSourceType: + case autoscalingv2beta2.ObjectMetricSourceType: current := "" if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Object != nil { current = hpa.Status.CurrentMetrics[i].Object.Current.Value.String() } w.Write(LEVEL_1, "%q on %s/%s:\t%s / %s\n", metric.Object.Metric.Name, metric.Object.DescribedObject.Kind, metric.Object.DescribedObject.Name, current, metric.Object.Target.Value.String()) - case autoscaling.ResourceMetricSourceType: + case autoscalingv2beta2.ResourceMetricSourceType: w.Write(LEVEL_1, "resource %s on pods", string(metric.Resource.Name)) if metric.Resource.Target.AverageValue != nil { current := "" @@ -3091,10 +3156,10 @@ func describeHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, e }) } -func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node, w PrefixWriter) { +func describeNodeResource(nodeNonTerminatedPodsList *corev1.PodList, node *corev1.Node, w PrefixWriter) { w.Write(LEVEL_0, "Non-terminated Pods:\t(%d in total)\n", len(nodeNonTerminatedPodsList.Items)) - w.Write(LEVEL_1, "Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") - w.Write(LEVEL_1, "---------\t----\t\t------------\t----------\t---------------\t-------------\n") + w.Write(LEVEL_1, "Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\tAGE\n") + w.Write(LEVEL_1, "---------\t----\t\t------------\t----------\t---------------\t-------------\t---\n") allocatable := node.Status.Capacity if len(node.Status.Allocatable) > 0 { allocatable = node.Status.Allocatable @@ -3102,21 +3167,22 @@ func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node for _, pod := range nodeNonTerminatedPodsList.Items { req, limit := resourcehelper.PodRequestsAndLimits(&pod) - cpuReq, cpuLimit, memoryReq, memoryLimit := req[api.ResourceCPU], limit[api.ResourceCPU], req[api.ResourceMemory], limit[api.ResourceMemory] + cpuReq, cpuLimit, memoryReq, memoryLimit := req[corev1.ResourceCPU], limit[corev1.ResourceCPU], req[corev1.ResourceMemory], limit[corev1.ResourceMemory] fractionCpuReq := float64(cpuReq.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 fractionMemoryReq := float64(memoryReq.Value()) / float64(allocatable.Memory().Value()) * 100 fractionMemoryLimit := float64(memoryLimit.Value()) / float64(allocatable.Memory().Value()) * 100 - w.Write(LEVEL_1, "%s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", pod.Namespace, pod.Name, + w.Write(LEVEL_1, "%s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s\n", pod.Namespace, pod.Name, cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit), - memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit)) + memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit), translateTimestampSince(pod.CreationTimestamp)) } w.Write(LEVEL_0, "Allocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n") w.Write(LEVEL_1, "Resource\tRequests\tLimits\n") w.Write(LEVEL_1, "--------\t--------\t------\n") reqs, limits := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList) - cpuReqs, cpuLimits, memoryReqs, memoryLimits := reqs[api.ResourceCPU], limits[api.ResourceCPU], reqs[api.ResourceMemory], limits[api.ResourceMemory] + cpuReqs, cpuLimits, memoryReqs, memoryLimits, ephemeralstorageReqs, ephemeralstorageLimits := + reqs[corev1.ResourceCPU], limits[corev1.ResourceCPU], reqs[corev1.ResourceMemory], limits[corev1.ResourceMemory], reqs[corev1.ResourceEphemeralStorage], limits[corev1.ResourceEphemeralStorage] fractionCpuReqs := float64(0) fractionCpuLimits := float64(0) if allocatable.Cpu().MilliValue() != 0 { @@ -3129,25 +3195,33 @@ func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node fractionMemoryReqs = float64(memoryReqs.Value()) / float64(allocatable.Memory().Value()) * 100 fractionMemoryLimits = float64(memoryLimits.Value()) / float64(allocatable.Memory().Value()) * 100 } + fractionEphemeralStorageReqs := float64(0) + fractionEphemeralStorageLimits := float64(0) + if allocatable.StorageEphemeral().Value() != 0 { + fractionEphemeralStorageReqs = float64(ephemeralstorageReqs.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100 + fractionEphemeralStorageLimits = float64(ephemeralstorageLimits.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100 + } w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", - api.ResourceCPU, cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits)) + corev1.ResourceCPU, cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits)) w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", - api.ResourceMemory, memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) + corev1.ResourceMemory, memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) + w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", + corev1.ResourceEphemeralStorage, ephemeralstorageReqs.String(), int64(fractionEphemeralStorageReqs), ephemeralstorageLimits.String(), int64(fractionEphemeralStorageLimits)) extResources := make([]string, 0, len(allocatable)) for resource := range allocatable { - if !helper.IsStandardContainerResourceName(string(resource)) && resource != api.ResourcePods { + if !resourcehelper.IsStandardContainerResourceName(string(resource)) && resource != corev1.ResourcePods { extResources = append(extResources, string(resource)) } } sort.Strings(extResources) for _, ext := range extResources { - extRequests, extLimits := reqs[api.ResourceName(ext)], limits[api.ResourceName(ext)] + extRequests, extLimits := reqs[corev1.ResourceName(ext)], limits[corev1.ResourceName(ext)] w.Write(LEVEL_1, "%s\t%s\t%s\n", ext, extRequests.String(), extLimits.String()) } } -func getPodsTotalRequestsAndLimits(podList *api.PodList) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity) { - reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{} +func getPodsTotalRequestsAndLimits(podList *corev1.PodList) (reqs map[corev1.ResourceName]resource.Quantity, limits map[corev1.ResourceName]resource.Quantity) { + reqs, limits = map[corev1.ResourceName]resource.Quantity{}, map[corev1.ResourceName]resource.Quantity{} for _, pod := range podList.Items { podReqs, podLimits := resourcehelper.PodRequestsAndLimits(&pod) for podReqName, podReqValue := range podReqs { @@ -3170,13 +3244,13 @@ func getPodsTotalRequestsAndLimits(podList *api.PodList) (reqs map[api.ResourceN return } -func DescribeEvents(el *api.EventList, w PrefixWriter) { +func DescribeEvents(el *corev1.EventList, w PrefixWriter) { if len(el.Items) == 0 { w.Write(LEVEL_0, "Events:\t\n") return } w.Flush() - sort.Sort(events.SortableEvents(el.Items)) + sort.Sort(event.SortableEvents(el.Items)) w.Write(LEVEL_0, "Events:\n Type\tReason\tAge\tFrom\tMessage\n") w.Write(LEVEL_1, "----\t------\t----\t----\t-------\n") for _, e := range el.Items { @@ -3198,12 +3272,11 @@ func DescribeEvents(el *api.EventList, w PrefixWriter) { // DeploymentDescriber generates information about a deployment. type DeploymentDescriber struct { - clientset.Interface - external externalclient.Interface + client clientset.Interface } -func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { - d, err := dd.external.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) +func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { + d, err := dd.client.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -3211,20 +3284,15 @@ func (dd *DeploymentDescriber) Describe(namespace, name string, describerSetting if err != nil { return "", err } - internalDeployment := &extensions.Deployment{} - if err := legacyscheme.Scheme.Convert(d, internalDeployment, extensions.SchemeGroupVersion); err != nil { - return "", err - } - - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = dd.Core().Events(namespace).Search(legacyscheme.Scheme, d) + events, _ = dd.client.CoreV1().Events(namespace).Search(scheme.Scheme, d) } - return describeDeployment(d, selector, internalDeployment, events, dd) + return describeDeployment(d, selector, d, events, dd) } -func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internalDeployment *extensions.Deployment, events *api.EventList, dd *DeploymentDescriber) (string, error) { +func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internalDeployment *appsv1.Deployment, events *corev1.EventList, dd *DeploymentDescriber) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", d.ObjectMeta.Name) @@ -3248,7 +3316,7 @@ func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internal w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason) } } - oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd.external.AppsV1()) + oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd.client.AppsV1()) if err == nil { w.Write(LEVEL_0, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs)) var newRSs []*appsv1.ReplicaSet @@ -3279,7 +3347,7 @@ func printReplicaSetsByLabels(matchingRSs []*appsv1.ReplicaSet) string { return list } -func getPodStatusForController(c coreclient.PodInterface, selector labels.Selector, uid types.UID) (running, waiting, succeeded, failed int, err error) { +func getPodStatusForController(c corev1client.PodInterface, selector labels.Selector, uid types.UID) (running, waiting, succeeded, failed int, err error) { options := metav1.ListOptions{LabelSelector: selector.String()} rcPods, err := c.List(options) if err != nil { @@ -3292,13 +3360,13 @@ func getPodStatusForController(c coreclient.PodInterface, selector labels.Select continue } switch pod.Status.Phase { - case api.PodRunning: + case corev1.PodRunning: running++ - case api.PodPending: + case corev1.PodPending: waiting++ - case api.PodSucceeded: + case corev1.PodSucceeded: succeeded++ - case api.PodFailed: + case corev1.PodFailed: failed++ } } @@ -3310,7 +3378,7 @@ type ConfigMapDescriber struct { clientset.Interface } -func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().ConfigMaps(namespace) configMap, err := c.Get(name, metav1.GetOptions{}) @@ -3331,7 +3399,7 @@ func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings w.Write(LEVEL_0, "%s\n", string(v)) } if describerSettings.ShowEvents { - events, err := d.Core().Events(namespace).Search(legacyscheme.Scheme, configMap) + events, err := d.Core().Events(namespace).Search(scheme.Scheme, configMap) if err != nil { return err } @@ -3343,12 +3411,12 @@ func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings }) } -// NetworkPolicyDescriber generates information about a networking.NetworkPolicy +// NetworkPolicyDescriber generates information about a networkingv1.NetworkPolicy type NetworkPolicyDescriber struct { clientset.Interface } -func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Networking().NetworkPolicies(namespace) networkPolicy, err := c.Get(name, metav1.GetOptions{}) @@ -3359,7 +3427,7 @@ func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSetti return describeNetworkPolicy(networkPolicy) } -func describeNetworkPolicy(networkPolicy *networking.NetworkPolicy) (string, error) { +func describeNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", networkPolicy.Name) @@ -3372,7 +3440,7 @@ func describeNetworkPolicy(networkPolicy *networking.NetworkPolicy) (string, err }) } -func describeNetworkPolicySpec(nps networking.NetworkPolicySpec, w PrefixWriter) { +func describeNetworkPolicySpec(nps networkingv1.NetworkPolicySpec, w PrefixWriter) { w.Write(LEVEL_0, "Spec:\n") w.Write(LEVEL_1, "PodSelector: ") if len(nps.PodSelector.MatchLabels) == 0 && len(nps.PodSelector.MatchExpressions) == 0 { @@ -3387,7 +3455,7 @@ func describeNetworkPolicySpec(nps networking.NetworkPolicySpec, w PrefixWriter) w.Write(LEVEL_1, "Policy Types: %v\n", policyTypesToString(nps.PolicyTypes)) } -func printNetworkPolicySpecIngressFrom(npirs []networking.NetworkPolicyIngressRule, initialIndent string, w PrefixWriter) { +func printNetworkPolicySpecIngressFrom(npirs []networkingv1.NetworkPolicyIngressRule, initialIndent string, w PrefixWriter) { if len(npirs) == 0 { w.Write(LEVEL_0, "%s%s\n", initialIndent, " (Selected pods are isolated for ingress connectivity)") return @@ -3397,11 +3465,11 @@ func printNetworkPolicySpecIngressFrom(npirs []networking.NetworkPolicyIngressRu w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: (traffic allowed to all ports)") } else { for _, port := range npir.Ports { - var proto api.Protocol + var proto corev1.Protocol if port.Protocol != nil { proto = *port.Protocol } else { - proto = api.ProtocolTCP + proto = corev1.ProtocolTCP } w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) } @@ -3431,7 +3499,7 @@ func printNetworkPolicySpecIngressFrom(npirs []networking.NetworkPolicyIngressRu } } -func printNetworkPolicySpecEgressTo(npers []networking.NetworkPolicyEgressRule, initialIndent string, w PrefixWriter) { +func printNetworkPolicySpecEgressTo(npers []networkingv1.NetworkPolicyEgressRule, initialIndent string, w PrefixWriter) { if len(npers) == 0 { w.Write(LEVEL_0, "%s%s\n", initialIndent, " (Selected pods are isolated for egress connectivity)") return @@ -3441,11 +3509,11 @@ func printNetworkPolicySpecEgressTo(npers []networking.NetworkPolicyEgressRule, w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: (traffic allowed to all ports)") } else { for _, port := range nper.Ports { - var proto api.Protocol + var proto corev1.Protocol if port.Protocol != nil { proto = *port.Protocol } else { - proto = api.ProtocolTCP + proto = corev1.ProtocolTCP } w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) } @@ -3479,21 +3547,21 @@ type StorageClassDescriber struct { clientset.Interface } -func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { sc, err := s.Storage().StorageClasses().Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = s.Core().Events(namespace).Search(legacyscheme.Scheme, sc) + events, _ = s.Core().Events(namespace).Search(scheme.Scheme, sc) } return describeStorageClass(sc, events) } -func describeStorageClass(sc *storage.StorageClass, events *api.EventList) (string, error) { +func describeStorageClass(sc *storagev1.StorageClass, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", sc.Name) @@ -3527,7 +3595,7 @@ func describeStorageClass(sc *storage.StorageClass, events *api.EventList) (stri }) } -func printAllowedTopologies(w PrefixWriter, topologies []api.TopologySelectorTerm) { +func printAllowedTopologies(w PrefixWriter, topologies []corev1.TopologySelectorTerm) { w.Write(LEVEL_0, "AllowedTopologies:\t") if len(topologies) == 0 { w.WriteLine("") @@ -3539,7 +3607,7 @@ func printAllowedTopologies(w PrefixWriter, topologies []api.TopologySelectorTer } } -func printTopologySelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []api.TopologySelectorLabelRequirement) { +func printTopologySelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []corev1.TopologySelectorLabelRequirement) { w.Write(indentLevel, "%s:%s", title, innerIndent) if len(reqs) == 0 { @@ -3563,21 +3631,21 @@ type PodDisruptionBudgetDescriber struct { clientset.Interface } -func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { pdb, err := p.Policy().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = p.Core().Events(namespace).Search(legacyscheme.Scheme, pdb) + events, _ = p.Core().Events(namespace).Search(scheme.Scheme, pdb) } return describePodDisruptionBudget(pdb, events) } -func describePodDisruptionBudget(pdb *policy.PodDisruptionBudget, events *api.EventList) (string, error) { +func describePodDisruptionBudget(pdb *policyv1beta1.PodDisruptionBudget, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", pdb.Name) @@ -3612,21 +3680,21 @@ type PriorityClassDescriber struct { clientset.Interface } -func (s *PriorityClassDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (s *PriorityClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { pc, err := s.Scheduling().PriorityClasses().Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = s.Core().Events(namespace).Search(legacyscheme.Scheme, pc) + events, _ = s.Core().Events(namespace).Search(scheme.Scheme, pc) } return describePriorityClass(pc, events) } -func describePriorityClass(pc *scheduling.PriorityClass, events *api.EventList) (string, error) { +func describePriorityClass(pc *schedulingv1beta1.PriorityClass, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", pc.Name) @@ -3643,12 +3711,12 @@ func describePriorityClass(pc *scheduling.PriorityClass, events *api.EventList) }) } -// PodSecurityPolicyDescriber generates information about a PodSecurityPolicy. +// PodSecurityPolicyDescriber generates information about a PodSecuritypolicyv1beta1. type PodSecurityPolicyDescriber struct { clientset.Interface } -func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { psp, err := d.Policy().PodSecurityPolicies().Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -3657,7 +3725,7 @@ func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerS return describePodSecurityPolicy(psp) } -func describePodSecurityPolicy(psp *policy.PodSecurityPolicy) (string, error) { +func describePodSecurityPolicy(psp *policyv1beta1.PodSecurityPolicy) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", psp.Name) @@ -3723,7 +3791,7 @@ func stringOrDefaultValue(s, defaultValue string) string { return defaultValue } -func fsTypeToString(volumes []policy.FSType) string { +func fsTypeToString(volumes []policyv1beta1.FSType) string { strVolumes := []string{} for _, v := range volumes { strVolumes = append(strVolumes, string(v)) @@ -3731,7 +3799,7 @@ func fsTypeToString(volumes []policy.FSType) string { return stringOrNone(strings.Join(strVolumes, ",")) } -func flexVolumesToString(flexVolumes []policy.AllowedFlexVolume) string { +func flexVolumesToString(flexVolumes []policyv1beta1.AllowedFlexVolume) string { volumes := []string{} for _, flexVolume := range flexVolumes { volumes = append(volumes, "driver="+flexVolume.Driver) @@ -3743,7 +3811,7 @@ func sysctlsToString(sysctls []string) string { return stringOrNone(strings.Join(sysctls, ",")) } -func hostPortRangeToString(ranges []policy.HostPortRange) string { +func hostPortRangeToString(ranges []policyv1beta1.HostPortRange) string { formattedString := "" if ranges != nil { strRanges := []string{} @@ -3755,7 +3823,7 @@ func hostPortRangeToString(ranges []policy.HostPortRange) string { return stringOrNone(formattedString) } -func idRangeToString(ranges []policy.IDRange) string { +func idRangeToString(ranges []policyv1beta1.IDRange) string { formattedString := "" if ranges != nil { strRanges := []string{} @@ -3767,7 +3835,7 @@ func idRangeToString(ranges []policy.IDRange) string { return stringOrNone(formattedString) } -func capsToString(caps []api.Capability) string { +func capsToString(caps []corev1.Capability) string { formattedString := "" if caps != nil { strCaps := []string{} @@ -3779,7 +3847,7 @@ func capsToString(caps []api.Capability) string { return stringOrNone(formattedString) } -func policyTypesToString(pts []networking.PolicyType) string { +func policyTypesToString(pts []networkingv1.PolicyType) string { formattedString := "" if pts != nil { strPts := []string{} @@ -3797,7 +3865,7 @@ func newErrNoDescriber(types ...reflect.Type) error { for _, t := range types { names = append(names, t.String()) } - return printers.ErrNoDescriber{Types: names} + return describe.ErrNoDescriber{Types: names} } // Describers implements ObjectDescriber against functions registered via Add. Those functions can @@ -3844,7 +3912,7 @@ func (d *Describers) DescribeObject(exact interface{}, extra ...interface{}) (st return "", newErrNoDescriber(append([]reflect.Type{exactType}, types...)...) } -// Add adds one or more describer functions to the printers.Describer. The passed function must +// Add adds one or more describer functions to the describe.Describer. The passed function must // match the signature: // // func(...) (string, error) @@ -3971,12 +4039,12 @@ func printLabelsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerI } // printTaintsMultiline prints multiple taints with a proper alignment. -func printNodeTaintsMultiline(w PrefixWriter, title string, taints []api.Taint) { +func printNodeTaintsMultiline(w PrefixWriter, title string, taints []corev1.Taint) { printTaintsMultilineWithIndent(w, "", title, "\t", taints) } // printTaintsMultilineWithIndent prints multiple taints with a user-defined alignment. -func printTaintsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, taints []api.Taint) { +func printTaintsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, taints []corev1.Taint) { w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) if taints == nil || len(taints) == 0 { @@ -3986,7 +4054,7 @@ func printTaintsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerI // to print taints in the sorted order sort.Slice(taints, func(i, j int) bool { - cmpKey := func(taint api.Taint) string { + cmpKey := func(taint corev1.Taint) string { return string(taint.Effect) + "," + taint.Key } return cmpKey(taints[i]) < cmpKey(taints[j]) @@ -4002,12 +4070,12 @@ func printTaintsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerI } // printPodsMultiline prints multiple pods with a proper alignment. -func printPodsMultiline(w PrefixWriter, title string, pods []api.Pod) { +func printPodsMultiline(w PrefixWriter, title string, pods []corev1.Pod) { printPodsMultilineWithIndent(w, "", title, "\t", pods) } // printPodsMultilineWithIndent prints multiple pods with a user-defined alignment. -func printPodsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, pods []api.Pod) { +func printPodsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, pods []corev1.Pod) { w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) if pods == nil || len(pods) == 0 { @@ -4017,7 +4085,7 @@ func printPodsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerInd // to print pods in the sorted order sort.Slice(pods, func(i, j int) bool { - cmpKey := func(pod api.Pod) string { + cmpKey := func(pod corev1.Pod) string { return pod.Name } return cmpKey(pods[i]) < cmpKey(pods[j]) @@ -4033,12 +4101,12 @@ func printPodsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerInd } // printPodTolerationsMultiline prints multiple tolerations with a proper alignment. -func printPodTolerationsMultiline(w PrefixWriter, title string, tolerations []api.Toleration) { +func printPodTolerationsMultiline(w PrefixWriter, title string, tolerations []corev1.Toleration) { printTolerationsMultilineWithIndent(w, "", title, "\t", tolerations) } // printTolerationsMultilineWithIndent prints multiple tolerations with a user-defined alignment. -func printTolerationsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, tolerations []api.Toleration) { +func printTolerationsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, tolerations []corev1.Toleration) { w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) if tolerations == nil || len(tolerations) == 0 { @@ -4089,7 +4157,7 @@ func tabbedString(f func(io.Writer) error) (string, error) { return str, nil } -type SortableResourceNames []api.ResourceName +type SortableResourceNames []corev1.ResourceName func (list SortableResourceNames) Len() int { return len(list) @@ -4104,8 +4172,8 @@ func (list SortableResourceNames) Less(i, j int) bool { } // SortedResourceNames returns the sorted resource names of a resource list. -func SortedResourceNames(list api.ResourceList) []api.ResourceName { - resources := make([]api.ResourceName, 0, len(list)) +func SortedResourceNames(list corev1.ResourceList) []corev1.ResourceName { + resources := make([]corev1.ResourceName, 0, len(list)) for res := range list { resources = append(resources, res) } @@ -4113,7 +4181,7 @@ func SortedResourceNames(list api.ResourceList) []api.ResourceName { return resources } -type SortableResourceQuotas []api.ResourceQuota +type SortableResourceQuotas []corev1.ResourceQuota func (list SortableResourceQuotas) Len() int { return len(list) @@ -4127,7 +4195,7 @@ func (list SortableResourceQuotas) Less(i, j int) bool { return list[i].Name < list[j].Name } -type SortableVolumeMounts []api.VolumeMount +type SortableVolumeMounts []corev1.VolumeMount func (list SortableVolumeMounts) Len() int { return len(list) @@ -4141,7 +4209,7 @@ func (list SortableVolumeMounts) Less(i, j int) bool { return list[i].MountPath < list[j].MountPath } -type SortableVolumeDevices []api.VolumeDevice +type SortableVolumeDevices []corev1.VolumeDevice func (list SortableVolumeDevices) Len() int { return len(list) @@ -4215,3 +4283,156 @@ func shorten(s string, maxLength int) string { } return s } + +// translateTimestampUntil returns the elapsed time until timestamp in +// human-readable approximation. +func translateTimestampUntil(timestamp metav1.Time) string { + if timestamp.IsZero() { + return "" + } + + return duration.HumanDuration(time.Until(timestamp.Time)) +} + +// translateTimestampSince returns the elapsed time since timestamp in +// human-readable approximation. +func translateTimestampSince(timestamp metav1.Time) string { + if timestamp.IsZero() { + return "" + } + + return duration.HumanDuration(time.Since(timestamp.Time)) +} + +// formatEventSource formats EventSource as a comma separated string excluding Host when empty +func formatEventSource(es corev1.EventSource) string { + EventSourceString := []string{es.Component} + if len(es.Host) > 0 { + EventSourceString = append(EventSourceString, es.Host) + } + return strings.Join(EventSourceString, ", ") +} + +// Pass ports=nil for all ports. +func formatEndpoints(endpoints *corev1.Endpoints, ports sets.String) string { + if len(endpoints.Subsets) == 0 { + return "" + } + list := []string{} + max := 3 + more := false + count := 0 + for i := range endpoints.Subsets { + ss := &endpoints.Subsets[i] + if len(ss.Ports) == 0 { + // It's possible to have headless services with no ports. + for i := range ss.Addresses { + if len(list) == max { + more = true + } + if !more { + list = append(list, ss.Addresses[i].IP) + } + count++ + } + } else { + // "Normal" services with ports defined. + for i := range ss.Ports { + port := &ss.Ports[i] + if ports == nil || ports.Has(port.Name) { + for i := range ss.Addresses { + if len(list) == max { + more = true + } + addr := &ss.Addresses[i] + if !more { + hostPort := net.JoinHostPort(addr.IP, strconv.Itoa(int(port.Port))) + list = append(list, hostPort) + } + count++ + } + } + } + } + } + ret := strings.Join(list, ",") + if more { + return fmt.Sprintf("%s + %d more...", ret, count-max) + } + return ret +} + +func extractCSRStatus(csr *certificatesv1beta1.CertificateSigningRequest) (string, error) { + var approved, denied bool + for _, c := range csr.Status.Conditions { + switch c.Type { + case certificatesv1beta1.CertificateApproved: + approved = true + case certificatesv1beta1.CertificateDenied: + denied = true + default: + return "", fmt.Errorf("unknown csr condition %q", c) + } + } + var status string + // must be in order of presidence + if denied { + status += "Denied" + } else if approved { + status += "Approved" + } else { + status += "Pending" + } + if len(csr.Status.Certificate) > 0 { + status += ",Issued" + } + return status, nil +} + +// backendStringer behaves just like a string interface and converts the given backend to a string. +func backendStringer(backend *extensionsv1beta1.IngressBackend) string { + if backend == nil { + return "" + } + return fmt.Sprintf("%v:%v", backend.ServiceName, backend.ServicePort.String()) +} + +// findNodeRoles returns the roles of a given node. +// The roles are determined by looking for: +// * a node-role.kubernetes.io/="" label +// * a kubernetes.io/role="" label +func findNodeRoles(node *corev1.Node) []string { + roles := sets.NewString() + for k, v := range node.Labels { + switch { + case strings.HasPrefix(k, describe.LabelNodeRolePrefix): + if role := strings.TrimPrefix(k, describe.LabelNodeRolePrefix); len(role) > 0 { + roles.Insert(role) + } + + case k == describe.NodeLabelRole && v != "": + roles.Insert(v) + } + } + return roles.List() +} + +// loadBalancerStatusStringer behaves mostly like a string interface and converts the given status to a string. +// `wide` indicates whether the returned value is meant for --o=wide output. If not, it's clipped to 16 bytes. +func loadBalancerStatusStringer(s corev1.LoadBalancerStatus, wide bool) string { + ingress := s.Ingress + result := sets.NewString() + for i := range ingress { + if ingress[i].IP != "" { + result.Insert(ingress[i].IP) + } else if ingress[i].Hostname != "" { + result.Insert(ingress[i].Hostname) + } + } + + r := strings.Join(result.List(), ",") + if !wide && len(r) > describe.LoadBalancerWidth { + r = r[0:(describe.LoadBalancerWidth-3)] + "..." + } + return r +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/explain/recursive_fields_printer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/explain/recursive_fields_printer.go index 95638c85adfe5..6429a73264eb6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/explain/recursive_fields_printer.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/explain/recursive_fields_printer.go @@ -30,6 +30,7 @@ type recursiveFieldsPrinter struct { var _ proto.SchemaVisitor = &recursiveFieldsPrinter{} var _ fieldsPrinter = &recursiveFieldsPrinter{} +var visitedReferences = map[string]struct{}{} // VisitArray is just a passthrough. func (f *recursiveFieldsPrinter) VisitArray(a *proto.Array) { @@ -64,7 +65,12 @@ func (f *recursiveFieldsPrinter) VisitPrimitive(p *proto.Primitive) { // VisitReference is just a passthrough. func (f *recursiveFieldsPrinter) VisitReference(r proto.Reference) { + if _, ok := visitedReferences[r.Reference()]; ok { + return + } + visitedReferences[r.Reference()] = struct{}{} r.SubSchema().Accept(f) + delete(visitedReferences, r.Reference()) } // PrintFields will recursively print all the fields for the given diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/explain/test-recursive-swagger.json b/vendor/k8s.io/kubernetes/pkg/kubectl/explain/test-recursive-swagger.json new file mode 100644 index 0000000000000..1ae79855d3936 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/explain/test-recursive-swagger.json @@ -0,0 +1,63 @@ +{ + "swagger": "2.0", + "info": { + "title": "Kubernetes", + "version": "v1.9.0" + }, + "paths": {}, + "definitions": { + "OneKind": { + "description": "OneKind has a short description", + "required": [ + "field1" + ], + "properties": { + "field1": { + "description": "This is first reference field", + "$ref": "#/definitions/ReferenceKind" + }, + "field2": { + "description": "This is other kind field with string and reference", + "$ref": "#/definitions/OtherKind" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "OneKind", + "version": "v2" + } + ] + }, + "ReferenceKind": { + "description": "This is reference Kind", + "properties": { + "referencefield": { + "description": "This is reference to itself.", + "$ref": "#/definitions/ReferenceKind" + }, + "referencesarray": { + "description": "This is an array of references", + "type": "array", + "items": { + "description": "This is reference object", + "$ref": "#/definitions/ReferenceKind" + } + } + } + }, + "OtherKind": { + "description": "This is other kind with string and reference fields", + "properties": { + "string": { + "description": "This string must be a string", + "type": "string" + }, + "reference": { + "description": "This is reference field.", + "$ref": "#/definitions/ReferenceKind" + } + } + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/generate/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/BUILD.bazel new file mode 100644 index 0000000000000..a47358f899cef --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["generate.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/generate", + importpath = "k8s.io/kubernetes/pkg/kubectl/generate", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/generate.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/kubectl/generate.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/generate.go index 07d8bb893207b..1915be8cb9617 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/generate.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package generate import ( "fmt" @@ -24,10 +24,14 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" ) +// GeneratorFunc returns the generators for the provided command +type GeneratorFunc func(cmdName string) map[string]Generator + // GeneratorParam is a parameter for a generator // TODO: facilitate structured json generator input schemes type GeneratorParam struct { @@ -98,7 +102,7 @@ func AnnotateFlags(cmd *cobra.Command, generators map[string]Generator) { } } -// EnsureFlagsValid ensures that no invalid flags are being used against a generator. +// EnsureFlagsValid ensures that no invalid flags are being used against a func EnsureFlagsValid(cmd *cobra.Command, generators map[string]Generator, generatorInUse string) error { AnnotateFlags(cmd, generators) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/BUILD.bazel new file mode 100644 index 0000000000000..c5e77b38f340b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/BUILD.bazel @@ -0,0 +1,54 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "autoscale.go", + "clusterrolebinding.go", + "configmap.go", + "deployment.go", + "env_file.go", + "generator.go", + "namespace.go", + "pdb.go", + "priorityclass.go", + "quota.go", + "rolebinding.go", + "run.go", + "secret.go", + "secret_for_docker_registry.go", + "secret_for_tls.go", + "service.go", + "service_basic.go", + "serviceaccount.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned", + importpath = "k8s.io/kubernetes/pkg/kubectl/generate/versioned", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/api/apps/v1beta1:go_default_library", + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", + "//vendor/k8s.io/api/batch/v1:go_default_library", + "//vendor/k8s.io/api/batch/v1beta1:go_default_library", + "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", + "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", + "//vendor/k8s.io/api/scheduling/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/generate:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/hash:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/autoscale.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/autoscale.go index 39c78ca31df29..7cb8dbbc50d62 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/autoscale.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" @@ -22,21 +22,22 @@ import ( autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/kubectl/generate" ) -// HorizontalPodAutoscalerV1Generator supports stable generation of a horizontal pod autoscaler. +// HorizontalPodAutoscalerGeneratorV1 supports stable generation of a horizontal pod autoscaler. type HorizontalPodAutoscalerGeneratorV1 struct { Name string ScaleRefKind string ScaleRefName string - ScaleRefApiVersion string + ScaleRefAPIVersion string MinReplicas int32 MaxReplicas int32 CPUPercent int32 } // Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &HorizontalPodAutoscalerGeneratorV1{} +var _ generate.StructuredGenerator = &HorizontalPodAutoscalerGeneratorV1{} // StructuredGenerate outputs a horizontal pod autoscaler object using the configured fields. func (s *HorizontalPodAutoscalerGeneratorV1) StructuredGenerate() (runtime.Object, error) { @@ -52,7 +53,7 @@ func (s *HorizontalPodAutoscalerGeneratorV1) StructuredGenerate() (runtime.Objec ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{ Kind: s.ScaleRefKind, Name: s.ScaleRefName, - APIVersion: s.ScaleRefApiVersion, + APIVersion: s.ScaleRefAPIVersion, }, MaxReplicas: s.MaxReplicas, }, diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/clusterrolebinding.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/clusterrolebinding.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/kubectl/clusterrolebinding.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/clusterrolebinding.go index d946560691175..b874c63794b18 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/clusterrolebinding.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/clusterrolebinding.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" @@ -24,6 +24,7 @@ import ( rbacv1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/kubectl/generate" ) // ClusterRoleBindingGeneratorV1 supports stable generation of a clusterRoleBinding. @@ -41,14 +42,14 @@ type ClusterRoleBindingGeneratorV1 struct { } // Ensure it supports the generator pattern that uses parameter injection. -var _ Generator = &ClusterRoleBindingGeneratorV1{} +var _ generate.Generator = &ClusterRoleBindingGeneratorV1{} // Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &ClusterRoleBindingGeneratorV1{} +var _ generate.StructuredGenerator = &ClusterRoleBindingGeneratorV1{} // Generate returns a clusterRoleBinding using the specified parameters. func (s ClusterRoleBindingGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) + err := generate.ValidateParams(s.ParamNames(), genericParams) if err != nil { return nil, err } @@ -94,13 +95,13 @@ func (s ClusterRoleBindingGeneratorV1) Generate(genericParams map[string]interfa } // ParamNames returns the set of supported input parameters when using the parameter injection generator pattern. -func (s ClusterRoleBindingGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"clusterrole", false}, - {"user", false}, - {"group", false}, - {"serviceaccount", false}, +func (s ClusterRoleBindingGeneratorV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "clusterrole", Required: false}, + {Name: "user", Required: false}, + {Name: "group", Required: false}, + {Name: "serviceaccount", Required: false}, } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/configmap.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/configmap.go index ba4ac1ee2026d..67058422ca426 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/configmap.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" @@ -27,6 +27,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/kubernetes/pkg/kubectl/generate" "k8s.io/kubernetes/pkg/kubectl/util" "k8s.io/kubernetes/pkg/kubectl/util/hash" ) @@ -48,14 +49,14 @@ type ConfigMapGeneratorV1 struct { } // Ensure it supports the generator pattern that uses parameter injection. -var _ Generator = &ConfigMapGeneratorV1{} +var _ generate.Generator = &ConfigMapGeneratorV1{} // Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &ConfigMapGeneratorV1{} +var _ generate.StructuredGenerator = &ConfigMapGeneratorV1{} // Generate returns a configMap using the specified parameters. func (s ConfigMapGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) + err := generate.ValidateParams(s.ParamNames(), genericParams) if err != nil { return nil, err } @@ -111,15 +112,15 @@ func (s ConfigMapGeneratorV1) Generate(genericParams map[string]interface{}) (ru } // ParamNames returns the set of supported input parameters when using the parameter injection generator pattern. -func (s ConfigMapGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"type", false}, - {"from-file", false}, - {"from-literal", false}, - {"from-env-file", false}, - {"force", false}, - {"hash", false}, +func (s ConfigMapGeneratorV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "type", Required: false}, + {Name: "from-file", Required: false}, + {Name: "from-literal", Required: false}, + {Name: "from-env-file", Required: false}, + {Name: "force", Required: false}, + {Name: "hash", Required: false}, } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/deployment.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/deployment.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/kubectl/deployment.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/deployment.go index 9686d200d54e9..4b2b24d575dcc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/deployment.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/deployment.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" @@ -26,9 +26,10 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/kubectl/generate" ) -// BaseDeploymentGenerator: implement the common functionality of +// BaseDeploymentGenerator implements the common functionality of // DeploymentBasicGeneratorV1, DeploymentBasicAppsGeneratorV1Beta1 and DeploymentBasicAppsGeneratorV1. To reduce // confusion, it's best to keep this struct in the same file as those // generators. @@ -94,7 +95,7 @@ type DeploymentBasicGeneratorV1 struct { } // Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &DeploymentBasicGeneratorV1{} +var _ generate.StructuredGenerator = &DeploymentBasicGeneratorV1{} // StructuredGenerate outputs a deployment object using the configured fields func (s *DeploymentBasicGeneratorV1) StructuredGenerate() (runtime.Object, error) { @@ -124,7 +125,7 @@ type DeploymentBasicAppsGeneratorV1Beta1 struct { } // Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &DeploymentBasicAppsGeneratorV1Beta1{} +var _ generate.StructuredGenerator = &DeploymentBasicAppsGeneratorV1Beta1{} // StructuredGenerate outputs a deployment object using the configured fields func (s *DeploymentBasicAppsGeneratorV1Beta1) StructuredGenerate() (runtime.Object, error) { @@ -154,7 +155,7 @@ type DeploymentBasicAppsGeneratorV1 struct { } // Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &DeploymentBasicAppsGeneratorV1{} +var _ generate.StructuredGenerator = &DeploymentBasicAppsGeneratorV1{} // StructuredGenerate outputs a deployment object using the configured fields func (s *DeploymentBasicAppsGeneratorV1) StructuredGenerate() (runtime.Object, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/env_file.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/env_file.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/kubectl/env_file.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/env_file.go index 8221d5936cd18..1277257f24b41 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/env_file.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/env_file.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "bufio" diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/generator.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/generator.go similarity index 72% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/generator.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/generator.go index 7d02669bee859..01d0e21d5a7b2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/generator.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/generator.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package versioned import ( "fmt" @@ -29,9 +29,12 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" - "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/kubectl/generate" ) +// GeneratorFn gives a way to easily override the function for unit testing if needed +var GeneratorFn generate.GeneratorFunc = DefaultGenerators + const ( // TODO(sig-cli): Enforce consistent naming for generators here. // See discussion in https://github.com/kubernetes/kubernetes/issues/46237 @@ -48,6 +51,7 @@ const ( HorizontalPodAutoscalerV1GeneratorName = "horizontalpodautoscaler/v1" DeploymentV1Beta1GeneratorName = "deployment/v1beta1" DeploymentAppsV1Beta1GeneratorName = "deployment/apps.v1beta1" + DeploymentAppsV1GeneratorName = "deployment/apps.v1" DeploymentBasicV1Beta1GeneratorName = "deployment-basic/v1beta1" DeploymentBasicAppsV1Beta1GeneratorName = "deployment-basic/apps.v1beta1" DeploymentBasicAppsV1GeneratorName = "deployment-basic/apps.v1" @@ -67,70 +71,65 @@ const ( PriorityClassV1Alpha1GeneratorName = "priorityclass/v1alpha1" ) -// GeneratorFunc returns the generators for the provided command -type GeneratorFunc func(cmdName string) map[string]kubectl.Generator - -// GeneratorFn gives a way to easily override the function for unit testing if needed -var GeneratorFn GeneratorFunc = defaultGenerators - -// defaultGenerators returns the set of default generators for use in Factory instances -func defaultGenerators(cmdName string) map[string]kubectl.Generator { - var generator map[string]kubectl.Generator +// DefaultGenerators returns the set of default generators for use in Factory instances +func DefaultGenerators(cmdName string) map[string]generate.Generator { + var generator map[string]generate.Generator switch cmdName { case "expose": - generator = map[string]kubectl.Generator{ - ServiceV1GeneratorName: kubectl.ServiceGeneratorV1{}, - ServiceV2GeneratorName: kubectl.ServiceGeneratorV2{}, + generator = map[string]generate.Generator{ + ServiceV1GeneratorName: ServiceGeneratorV1{}, + ServiceV2GeneratorName: ServiceGeneratorV2{}, } case "service-clusterip": - generator = map[string]kubectl.Generator{ - ServiceClusterIPGeneratorV1Name: kubectl.ServiceClusterIPGeneratorV1{}, + generator = map[string]generate.Generator{ + ServiceClusterIPGeneratorV1Name: ServiceClusterIPGeneratorV1{}, } case "service-nodeport": - generator = map[string]kubectl.Generator{ - ServiceNodePortGeneratorV1Name: kubectl.ServiceNodePortGeneratorV1{}, + generator = map[string]generate.Generator{ + ServiceNodePortGeneratorV1Name: ServiceNodePortGeneratorV1{}, } case "service-loadbalancer": - generator = map[string]kubectl.Generator{ - ServiceLoadBalancerGeneratorV1Name: kubectl.ServiceLoadBalancerGeneratorV1{}, + generator = map[string]generate.Generator{ + ServiceLoadBalancerGeneratorV1Name: ServiceLoadBalancerGeneratorV1{}, } case "deployment": // Create Deployment has only StructuredGenerators and no // param-based Generators. // The StructuredGenerators are as follows (as of 2018-03-16): - // DeploymentBasicV1Beta1GeneratorName -> kubectl.DeploymentBasicGeneratorV1 - // DeploymentBasicAppsV1Beta1GeneratorName -> kubectl.DeploymentBasicAppsGeneratorV1Beta1 - // DeploymentBasicAppsV1GeneratorName -> kubectl.DeploymentBasicAppsGeneratorV1 - generator = map[string]kubectl.Generator{} + // DeploymentBasicV1Beta1GeneratorName -> DeploymentBasicGeneratorV1 + // DeploymentBasicAppsV1Beta1GeneratorName -> DeploymentBasicAppsGeneratorV1Beta1 + // DeploymentBasicAppsV1GeneratorName -> DeploymentBasicAppsGeneratorV1 + generator = map[string]generate.Generator{} case "run": - generator = map[string]kubectl.Generator{ - RunV1GeneratorName: kubectl.BasicReplicationController{}, - RunPodV1GeneratorName: kubectl.BasicPod{}, - DeploymentV1Beta1GeneratorName: kubectl.DeploymentV1Beta1{}, - DeploymentAppsV1Beta1GeneratorName: kubectl.DeploymentAppsV1Beta1{}, - JobV1GeneratorName: kubectl.JobV1{}, - CronJobV2Alpha1GeneratorName: kubectl.CronJobV2Alpha1{}, - CronJobV1Beta1GeneratorName: kubectl.CronJobV1Beta1{}, + generator = map[string]generate.Generator{ + RunV1GeneratorName: BasicReplicationController{}, + RunPodV1GeneratorName: BasicPod{}, + DeploymentV1Beta1GeneratorName: DeploymentV1Beta1{}, + DeploymentAppsV1Beta1GeneratorName: DeploymentAppsV1Beta1{}, + DeploymentAppsV1GeneratorName: DeploymentAppsV1{}, + JobV1GeneratorName: JobV1{}, + CronJobV2Alpha1GeneratorName: CronJobV2Alpha1{}, + CronJobV1Beta1GeneratorName: CronJobV1Beta1{}, } case "namespace": - generator = map[string]kubectl.Generator{ - NamespaceV1GeneratorName: kubectl.NamespaceGeneratorV1{}, + generator = map[string]generate.Generator{ + NamespaceV1GeneratorName: NamespaceGeneratorV1{}, } case "quota": - generator = map[string]kubectl.Generator{ - ResourceQuotaV1GeneratorName: kubectl.ResourceQuotaGeneratorV1{}, + generator = map[string]generate.Generator{ + ResourceQuotaV1GeneratorName: ResourceQuotaGeneratorV1{}, } case "secret": - generator = map[string]kubectl.Generator{ - SecretV1GeneratorName: kubectl.SecretGeneratorV1{}, + generator = map[string]generate.Generator{ + SecretV1GeneratorName: SecretGeneratorV1{}, } case "secret-for-docker-registry": - generator = map[string]kubectl.Generator{ - SecretForDockerRegistryV1GeneratorName: kubectl.SecretForDockerRegistryGeneratorV1{}, + generator = map[string]generate.Generator{ + SecretForDockerRegistryV1GeneratorName: SecretForDockerRegistryGeneratorV1{}, } case "secret-for-tls": - generator = map[string]kubectl.Generator{ - SecretForTLSV1GeneratorName: kubectl.SecretForTLSGeneratorV1{}, + generator = map[string]generate.Generator{ + SecretForTLSV1GeneratorName: SecretForTLSGeneratorV1{}, } } @@ -149,6 +148,14 @@ func FallbackGeneratorNameIfNecessary( cmdErr io.Writer, ) (string, error) { switch generatorName { + case DeploymentAppsV1GeneratorName: + hasResource, err := HasResource(discoveryClient, appsv1.SchemeGroupVersion.WithResource("deployments")) + if err != nil { + return "", err + } + if !hasResource { + return FallbackGeneratorNameIfNecessary(DeploymentAppsV1Beta1GeneratorName, discoveryClient, cmdErr) + } case DeploymentAppsV1Beta1GeneratorName: hasResource, err := HasResource(discoveryClient, appsv1beta1.SchemeGroupVersion.WithResource("deployments")) if err != nil { @@ -226,12 +233,3 @@ func HasResource(client discovery.DiscoveryInterface, resource schema.GroupVersi } return false, nil } - -func Warning(cmdErr io.Writer, newGeneratorName, oldGeneratorName string) { - fmt.Fprintf(cmdErr, "WARNING: New generator %q specified, "+ - "but it isn't available. "+ - "Falling back to %q.\n", - newGeneratorName, - oldGeneratorName, - ) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/namespace.go similarity index 84% rename from vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/namespace.go index 03723cdaa84f8..8921764db62a6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/namespace.go @@ -14,13 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/kubectl/generate" ) // NamespaceGeneratorV1 supports stable generation of a namespace @@ -30,14 +31,14 @@ type NamespaceGeneratorV1 struct { } // Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &NamespaceGeneratorV1{} +var _ generate.Generator = &NamespaceGeneratorV1{} // Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &NamespaceGeneratorV1{} +var _ generate.StructuredGenerator = &NamespaceGeneratorV1{} // Generate returns a namespace using the specified parameters func (g NamespaceGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(g.ParamNames(), genericParams) + err := generate.ValidateParams(g.ParamNames(), genericParams) if err != nil { return nil, err } @@ -54,9 +55,9 @@ func (g NamespaceGeneratorV1) Generate(genericParams map[string]interface{}) (ru } // ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (g NamespaceGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, +func (g NamespaceGeneratorV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/pdb.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/pdb.go similarity index 87% rename from vendor/k8s.io/kubernetes/pkg/kubectl/pdb.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/pdb.go index df2cc8f53b33c..a8cc6469e1156 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/pdb.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/pdb.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/kubernetes/pkg/kubectl/generate" ) // PodDisruptionBudgetV1Generator supports stable generation of a pod disruption budget. @@ -33,18 +34,18 @@ type PodDisruptionBudgetV1Generator struct { } // Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &PodDisruptionBudgetV1Generator{} +var _ generate.StructuredGenerator = &PodDisruptionBudgetV1Generator{} -func (PodDisruptionBudgetV1Generator) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"min-available", false}, - {"selector", true}, +func (PodDisruptionBudgetV1Generator) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "min-available", Required: false}, + {Name: "selector", Required: true}, } } func (s PodDisruptionBudgetV1Generator) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) + err := generate.ValidateParams(s.ParamNames(), params) if err != nil { return nil, err } @@ -115,19 +116,19 @@ type PodDisruptionBudgetV2Generator struct { } // Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &PodDisruptionBudgetV2Generator{} +var _ generate.StructuredGenerator = &PodDisruptionBudgetV2Generator{} -func (PodDisruptionBudgetV2Generator) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"min-available", false}, - {"max-unavailable", false}, - {"selector", true}, +func (PodDisruptionBudgetV2Generator) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "min-available", Required: false}, + {Name: "max-unavailable", Required: false}, + {Name: "selector", Required: true}, } } func (s PodDisruptionBudgetV2Generator) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) + err := generate.ValidateParams(s.ParamNames(), params) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/priorityclass.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/priorityclass.go similarity index 82% rename from vendor/k8s.io/kubernetes/pkg/kubectl/priorityclass.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/priorityclass.go index 51c71a5701891..bd10d47181bfc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/priorityclass.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/priorityclass.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" @@ -22,6 +22,7 @@ import ( scheduling "k8s.io/api/scheduling/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/kubectl/generate" ) // PriorityClassV1Generator supports stable generation of a priorityClass. @@ -33,19 +34,19 @@ type PriorityClassV1Generator struct { } // Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &PriorityClassV1Generator{} +var _ generate.StructuredGenerator = &PriorityClassV1Generator{} -func (PriorityClassV1Generator) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"value", true}, - {"global-default", false}, - {"description", false}, +func (PriorityClassV1Generator) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "value", Required: true}, + {Name: "global-default", Required: false}, + {Name: "description", Required: false}, } } func (s PriorityClassV1Generator) Generate(params map[string]interface{}) (runtime.Object, error) { - if err := ValidateParams(s.ParamNames(), params); err != nil { + if err := generate.ValidateParams(s.ParamNames(), params); err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/quota.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/quota.go similarity index 87% rename from vendor/k8s.io/kubernetes/pkg/kubectl/quota.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/quota.go index d51b74af951c0..c3710acb3b479 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/quota.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/quota.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" @@ -22,6 +22,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/kubectl/generate" ) // ResourceQuotaGeneratorV1 supports stable generation of a resource quota @@ -37,22 +38,22 @@ type ResourceQuotaGeneratorV1 struct { } // ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (g ResourceQuotaGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"hard", true}, - {"scopes", false}, +func (g ResourceQuotaGeneratorV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "hard", Required: true}, + {Name: "scopes", Required: false}, } } // Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &ResourceQuotaGeneratorV1{} +var _ generate.Generator = &ResourceQuotaGeneratorV1{} // Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &ResourceQuotaGeneratorV1{} +var _ generate.StructuredGenerator = &ResourceQuotaGeneratorV1{} func (g ResourceQuotaGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(g.ParamNames(), genericParams) + err := generate.ValidateParams(g.ParamNames(), genericParams) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rolebinding.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/rolebinding.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/kubectl/rolebinding.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/rolebinding.go index 0eddb1e0342c7..ed91208855b88 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rolebinding.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/rolebinding.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" @@ -24,6 +24,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/kubectl/generate" ) // RoleBindingGeneratorV1 supports stable generation of a roleBinding. @@ -43,14 +44,14 @@ type RoleBindingGeneratorV1 struct { } // Ensure it supports the generator pattern that uses parameter injection. -var _ Generator = &RoleBindingGeneratorV1{} +var _ generate.Generator = &RoleBindingGeneratorV1{} // Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &RoleBindingGeneratorV1{} +var _ generate.StructuredGenerator = &RoleBindingGeneratorV1{} // Generate returns a roleBinding using the specified parameters. func (s RoleBindingGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) + err := generate.ValidateParams(s.ParamNames(), genericParams) if err != nil { return nil, err } @@ -97,14 +98,14 @@ func (s RoleBindingGeneratorV1) Generate(genericParams map[string]interface{}) ( } // ParamNames returns the set of supported input parameters when using the parameter injection generator pattern. -func (s RoleBindingGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"clusterrole", false}, - {"role", false}, - {"user", false}, - {"group", false}, - {"serviceaccount", false}, +func (s RoleBindingGeneratorV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "clusterrole", Required: false}, + {Name: "role", Required: false}, + {Name: "user", Required: false}, + {Name: "group", Required: false}, + {Name: "serviceaccount", Required: false}, } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/run.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/run.go similarity index 68% rename from vendor/k8s.io/kubernetes/pkg/kubectl/run.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/run.go index 7fe1f2fe2b431..4557aef7779da 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/run.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/run.go @@ -14,13 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" "strconv" "strings" + appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" @@ -31,28 +32,29 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/kubernetes/pkg/kubectl/generate" ) type DeploymentV1Beta1 struct{} -func (DeploymentV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"replicas", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"serviceaccount", false}, +func (DeploymentV1Beta1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "labels", Required: false}, + {Name: "default-name", Required: false}, + {Name: "name", Required: true}, + {Name: "replicas", Required: true}, + {Name: "image", Required: true}, + {Name: "image-pull-policy", Required: false}, + {Name: "port", Required: false}, + {Name: "hostport", Required: false}, + {Name: "stdin", Required: false}, + {Name: "tty", Required: false}, + {Name: "command", Required: false}, + {Name: "args", Required: false}, + {Name: "env", Required: false}, + {Name: "requests", Required: false}, + {Name: "limits", Required: false}, + {Name: "serviceaccount", Required: false}, } } @@ -123,24 +125,24 @@ func (DeploymentV1Beta1) Generate(genericParams map[string]interface{}) (runtime type DeploymentAppsV1Beta1 struct{} -func (DeploymentAppsV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"replicas", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"serviceaccount", false}, +func (DeploymentAppsV1Beta1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "labels", Required: false}, + {Name: "default-name", Required: false}, + {Name: "name", Required: true}, + {Name: "replicas", Required: true}, + {Name: "image", Required: true}, + {Name: "image-pull-policy", Required: false}, + {Name: "port", Required: false}, + {Name: "hostport", Required: false}, + {Name: "stdin", Required: false}, + {Name: "tty", Required: false}, + {Name: "command", Required: false}, + {Name: "args", Required: false}, + {Name: "env", Required: false}, + {Name: "requests", Required: false}, + {Name: "limits", Required: false}, + {Name: "serviceaccount", Required: false}, } } @@ -209,13 +211,101 @@ func (DeploymentAppsV1Beta1) Generate(genericParams map[string]interface{}) (run return &deployment, nil } +type DeploymentAppsV1 struct{} + +func (DeploymentAppsV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "labels", Required: false}, + {Name: "default-name", Required: false}, + {Name: "name", Required: true}, + {Name: "replicas", Required: true}, + {Name: "image", Required: true}, + {Name: "image-pull-policy", Required: false}, + {Name: "port", Required: false}, + {Name: "hostport", Required: false}, + {Name: "stdin", Required: false}, + {Name: "tty", Required: false}, + {Name: "command", Required: false}, + {Name: "args", Required: false}, + {Name: "env", Required: false}, + {Name: "requests", Required: false}, + {Name: "limits", Required: false}, + {Name: "serviceaccount", Required: false}, + } +} + +func (DeploymentAppsV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + args, err := getArgs(genericParams) + if err != nil { + return nil, err + } + + envs, err := getEnvs(genericParams) + if err != nil { + return nil, err + } + + params, err := getParams(genericParams) + if err != nil { + return nil, err + } + + name, err := getName(params) + if err != nil { + return nil, err + } + + labels, err := getLabels(params, name) + if err != nil { + return nil, err + } + + count, err := strconv.Atoi(params["replicas"]) + if err != nil { + return nil, err + } + + podSpec, err := makePodSpec(params, name) + if err != nil { + return nil, err + } + + imagePullPolicy := v1.PullPolicy(params["image-pull-policy"]) + if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { + return nil, err + } + + if err := updatePodPorts(params, podSpec); err != nil { + return nil, err + } + + count32 := int32(count) + deployment := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &count32, + Selector: &metav1.LabelSelector{MatchLabels: labels}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: *podSpec, + }, + }, + } + return &deployment, nil +} + // getLabels returns map of labels. func getLabels(params map[string]string, name string) (map[string]string, error) { labelString, found := params["labels"] var labels map[string]string var err error if found && len(labelString) > 0 { - labels, err = ParseLabels(labelString) + labels, err = generate.ParseLabels(labelString) if err != nil { return nil, err } @@ -288,25 +378,25 @@ func getEnvs(genericParams map[string]interface{}) ([]v1.EnvVar, error) { type JobV1 struct{} -func (JobV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"restart", false}, - {"serviceaccount", false}, +func (JobV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "labels", Required: false}, + {Name: "default-name", Required: false}, + {Name: "name", Required: true}, + {Name: "image", Required: true}, + {Name: "image-pull-policy", Required: false}, + {Name: "port", Required: false}, + {Name: "hostport", Required: false}, + {Name: "stdin", Required: false}, + {Name: "leave-stdin-open", Required: false}, + {Name: "tty", Required: false}, + {Name: "command", Required: false}, + {Name: "args", Required: false}, + {Name: "env", Required: false}, + {Name: "requests", Required: false}, + {Name: "limits", Required: false}, + {Name: "restart", Required: false}, + {Name: "serviceaccount", Required: false}, } } @@ -346,7 +436,7 @@ func (JobV1) Generate(genericParams map[string]interface{}) (runtime.Object, err return nil, err } - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) + leaveStdinOpen, err := generate.GetBool(params, "leave-stdin-open", false) if err != nil { return nil, err } @@ -382,26 +472,26 @@ func (JobV1) Generate(genericParams map[string]interface{}) (runtime.Object, err type CronJobV2Alpha1 struct{} -func (CronJobV2Alpha1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"restart", false}, - {"schedule", true}, - {"serviceaccount", false}, +func (CronJobV2Alpha1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "labels", Required: false}, + {Name: "default-name", Required: false}, + {Name: "name", Required: true}, + {Name: "image", Required: true}, + {Name: "image-pull-policy", Required: false}, + {Name: "port", Required: false}, + {Name: "hostport", Required: false}, + {Name: "stdin", Required: false}, + {Name: "leave-stdin-open", Required: false}, + {Name: "tty", Required: false}, + {Name: "command", Required: false}, + {Name: "args", Required: false}, + {Name: "env", Required: false}, + {Name: "requests", Required: false}, + {Name: "limits", Required: false}, + {Name: "restart", Required: false}, + {Name: "schedule", Required: true}, + {Name: "serviceaccount", Required: false}, } } @@ -441,7 +531,7 @@ func (CronJobV2Alpha1) Generate(genericParams map[string]interface{}) (runtime.O return nil, err } - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) + leaveStdinOpen, err := generate.GetBool(params, "leave-stdin-open", false) if err != nil { return nil, err } @@ -483,26 +573,26 @@ func (CronJobV2Alpha1) Generate(genericParams map[string]interface{}) (runtime.O type CronJobV1Beta1 struct{} -func (CronJobV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"restart", false}, - {"schedule", true}, - {"serviceaccount", false}, +func (CronJobV1Beta1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "labels", Required: false}, + {Name: "default-name", Required: false}, + {Name: "name", Required: true}, + {Name: "image", Required: true}, + {Name: "image-pull-policy", Required: false}, + {Name: "port", Required: false}, + {Name: "hostport", Required: false}, + {Name: "stdin", Required: false}, + {Name: "leave-stdin-open", Required: false}, + {Name: "tty", Required: false}, + {Name: "command", Required: false}, + {Name: "args", Required: false}, + {Name: "env", Required: false}, + {Name: "requests", Required: false}, + {Name: "limits", Required: false}, + {Name: "restart", Required: false}, + {Name: "schedule", Required: true}, + {Name: "serviceaccount", Required: false}, } } @@ -542,7 +632,7 @@ func (CronJobV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Ob return nil, err } - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) + leaveStdinOpen, err := generate.GetBool(params, "leave-stdin-open", false) if err != nil { return nil, err } @@ -584,24 +674,24 @@ func (CronJobV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Ob type BasicReplicationController struct{} -func (BasicReplicationController) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"replicas", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"serviceaccount", false}, +func (BasicReplicationController) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "labels", Required: false}, + {Name: "default-name", Required: false}, + {Name: "name", Required: true}, + {Name: "replicas", Required: true}, + {Name: "image", Required: true}, + {Name: "image-pull-policy", Required: false}, + {Name: "port", Required: false}, + {Name: "hostport", Required: false}, + {Name: "stdin", Required: false}, + {Name: "tty", Required: false}, + {Name: "command", Required: false}, + {Name: "args", Required: false}, + {Name: "env", Required: false}, + {Name: "requests", Required: false}, + {Name: "limits", Required: false}, + {Name: "serviceaccount", Required: false}, } } @@ -649,12 +739,12 @@ func HandleResourceRequirementsV1(params map[string]string) (v1.ResourceRequirem // makePodSpec returns PodSpec filled with passed parameters. func makePodSpec(params map[string]string, name string) (*v1.PodSpec, error) { - stdin, err := GetBool(params, "stdin", false) + stdin, err := generate.GetBool(params, "stdin", false) if err != nil { return nil, err } - tty, err := GetBool(params, "tty", false) + tty, err := generate.GetBool(params, "tty", false) if err != nil { return nil, err } @@ -747,7 +837,7 @@ func (BasicReplicationController) Generate(genericParams map[string]interface{}) // updatePodContainers updates PodSpec.Containers with passed parameters. func updatePodContainers(params map[string]string, args []string, envs []v1.EnvVar, imagePullPolicy v1.PullPolicy, podSpec *v1.PodSpec) error { if len(args) > 0 { - command, err := GetBool(params, "command", false) + command, err := generate.GetBool(params, "command", false) if err != nil { return err } @@ -806,25 +896,25 @@ func updatePodPorts(params map[string]string, podSpec *v1.PodSpec) (err error) { type BasicPod struct{} -func (BasicPod) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"restart", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"serviceaccount", false}, +func (BasicPod) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "labels", Required: false}, + {Name: "default-name", Required: false}, + {Name: "name", Required: true}, + {Name: "image", Required: true}, + {Name: "image-pull-policy", Required: false}, + {Name: "port", Required: false}, + {Name: "hostport", Required: false}, + {Name: "stdin", Required: false}, + {Name: "leave-stdin-open", Required: false}, + {Name: "tty", Required: false}, + {Name: "restart", Required: false}, + {Name: "command", Required: false}, + {Name: "args", Required: false}, + {Name: "env", Required: false}, + {Name: "requests", Required: false}, + {Name: "limits", Required: false}, + {Name: "serviceaccount", Required: false}, } } @@ -854,16 +944,16 @@ func (BasicPod) Generate(genericParams map[string]interface{}) (runtime.Object, return nil, err } - stdin, err := GetBool(params, "stdin", false) + stdin, err := generate.GetBool(params, "stdin", false) if err != nil { return nil, err } - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) + leaveStdinOpen, err := generate.GetBool(params, "leave-stdin-open", false) if err != nil { return nil, err } - tty, err := GetBool(params, "tty", false) + tty, err := generate.GetBool(params, "tty", false) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/secret.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/kubectl/secret.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/secret.go index 9a0ff5281d415..02b27519f4560 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/secret.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" @@ -26,6 +26,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/kubernetes/pkg/kubectl/generate" "k8s.io/kubernetes/pkg/kubectl/util" "k8s.io/kubernetes/pkg/kubectl/util/hash" ) @@ -47,14 +48,14 @@ type SecretGeneratorV1 struct { } // Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &SecretGeneratorV1{} +var _ generate.Generator = &SecretGeneratorV1{} // Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &SecretGeneratorV1{} +var _ generate.StructuredGenerator = &SecretGeneratorV1{} // Generate returns a secret using the specified parameters func (s SecretGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) + err := generate.ValidateParams(s.ParamNames(), genericParams) if err != nil { return nil, err } @@ -112,15 +113,15 @@ func (s SecretGeneratorV1) Generate(genericParams map[string]interface{}) (runti } // ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (s SecretGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"type", false}, - {"from-file", false}, - {"from-literal", false}, - {"from-env-file", false}, - {"force", false}, - {"append-hash", false}, +func (s SecretGeneratorV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "type", Required: false}, + {Name: "from-file", Required: false}, + {Name: "from-literal", Required: false}, + {Name: "from-env-file", Required: false}, + {Name: "force", Required: false}, + {Name: "append-hash", Required: false}, } } @@ -204,7 +205,7 @@ func handleFromFileSources(secret *v1.Secret, fileSources []string) error { } if info.IsDir() { if strings.Contains(fileSource, "=") { - return fmt.Errorf("cannot give a key name for a directory path.") + return fmt.Errorf("cannot give a key name for a directory path") } fileList, err := ioutil.ReadDir(filePath) if err != nil { @@ -264,7 +265,7 @@ func addKeyFromLiteralToSecret(secret *v1.Secret, keyName string, data []byte) e } if _, entryExists := secret.Data[keyName]; entryExists { - return fmt.Errorf("cannot add key %s, another key by that name already exists: %v.", keyName, secret.Data) + return fmt.Errorf("cannot add key %s, another key by that name already exists: %v", keyName, secret.Data) } secret.Data[keyName] = data return nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/secret_for_docker_registry.go similarity index 66% rename from vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/secret_for_docker_registry.go index da95e6d458860..bae9f05aa1ab0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/secret_for_docker_registry.go @@ -14,15 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( + "encoding/base64" "encoding/json" "fmt" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/credentialprovider" + "k8s.io/kubernetes/pkg/kubectl/generate" "k8s.io/kubernetes/pkg/kubectl/util/hash" ) @@ -45,14 +46,14 @@ type SecretForDockerRegistryGeneratorV1 struct { } // Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &SecretForDockerRegistryGeneratorV1{} +var _ generate.Generator = &SecretForDockerRegistryGeneratorV1{} // Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &SecretForDockerRegistryGeneratorV1{} +var _ generate.StructuredGenerator = &SecretForDockerRegistryGeneratorV1{} // Generate returns a secret using the specified parameters func (s SecretForDockerRegistryGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) + err := generate.ValidateParams(s.ParamNames(), genericParams) if err != nil { return nil, err } @@ -97,11 +98,11 @@ func (s SecretForDockerRegistryGeneratorV1) StructuredGenerate() (runtime.Object } } if len(s.FileSources) == 0 { - dockercfgJsonContent, err := handleDockerCfgJsonContent(s.Username, s.Password, s.Email, s.Server) + dockercfgJSONContent, err := handleDockerCfgJSONContent(s.Username, s.Password, s.Email, s.Server) if err != nil { return nil, err } - secret.Data[v1.DockerConfigJsonKey] = dockercfgJsonContent + secret.Data[v1.DockerConfigJsonKey] = dockercfgJSONContent } if s.AppendHash { h, err := hash.SecretHash(secret) @@ -114,15 +115,15 @@ func (s SecretForDockerRegistryGeneratorV1) StructuredGenerate() (runtime.Object } // ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (s SecretForDockerRegistryGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"from-file", false}, - {"docker-username", true}, - {"docker-email", false}, - {"docker-password", true}, - {"docker-server", true}, - {"append-hash", false}, +func (s SecretForDockerRegistryGeneratorV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "from-file", Required: false}, + {Name: "docker-username", Required: true}, + {Name: "docker-email", Required: false}, + {Name: "docker-password", Required: true}, + {Name: "docker-server", Required: true}, + {Name: "append-hash", Required: false}, } } @@ -146,17 +147,43 @@ func (s SecretForDockerRegistryGeneratorV1) validate() error { return nil } -// handleDockerCfgJsonContent serializes a ~/.docker/config.json file -func handleDockerCfgJsonContent(username, password, email, server string) ([]byte, error) { - dockercfgAuth := credentialprovider.DockerConfigEntry{ +// handleDockerCfgJSONContent serializes a ~/.docker/config.json file +func handleDockerCfgJSONContent(username, password, email, server string) ([]byte, error) { + dockercfgAuth := DockerConfigEntry{ Username: username, Password: password, Email: email, + Auth: encodeDockerConfigFieldAuth(username, password), } - dockerCfgJson := credentialprovider.DockerConfigJson{ - Auths: map[string]credentialprovider.DockerConfigEntry{server: dockercfgAuth}, + dockerCfgJSON := DockerConfigJSON{ + Auths: map[string]DockerConfigEntry{server: dockercfgAuth}, } - return json.Marshal(dockerCfgJson) + return json.Marshal(dockerCfgJSON) +} + +func encodeDockerConfigFieldAuth(username, password string) string { + fieldValue := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(fieldValue)) +} + +// DockerConfigJSON represents a local docker auth config file +// for pulling images. +type DockerConfigJSON struct { + Auths DockerConfig `json:"auths"` + // +optional + HttpHeaders map[string]string `json:"HttpHeaders,omitempty"` +} + +// DockerConfig represents the config file used by the docker CLI. +// This config that represents the credentials that should be used +// when pulling images from specific image repositories. +type DockerConfig map[string]DockerConfigEntry + +type DockerConfigEntry struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Email string `json:"email,omitempty"` + Auth string `json:"auth,omitempty"` } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/secret_for_tls.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/secret_for_tls.go index 825e759086f5f..0733dab7c1e2d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/secret_for_tls.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "crypto/tls" @@ -23,6 +23,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/kubectl/generate" "k8s.io/kubernetes/pkg/kubectl/util/hash" ) @@ -39,14 +40,14 @@ type SecretForTLSGeneratorV1 struct { } // Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &SecretForTLSGeneratorV1{} +var _ generate.Generator = &SecretForTLSGeneratorV1{} // Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &SecretForTLSGeneratorV1{} +var _ generate.StructuredGenerator = &SecretForTLSGeneratorV1{} // Generate returns a secret using the specified parameters func (s SecretForTLSGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) + err := generate.ValidateParams(s.ParamNames(), genericParams) if err != nil { return nil, err } @@ -121,12 +122,12 @@ func readFile(file string) ([]byte, error) { } // ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (s SecretForTLSGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"key", true}, - {"cert", true}, - {"append-hash", false}, +func (s SecretForTLSGeneratorV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "key", Required: true}, + {Name: "cert", Required: true}, + {Name: "append-hash", Required: false}, } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/service.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/service.go similarity index 81% rename from vendor/k8s.io/kubernetes/pkg/kubectl/service.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/service.go index 7f0e0d869f72c..39d084bf9ff83 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/service.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/service.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" @@ -25,58 +25,59 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/kubernetes/pkg/kubectl/generate" ) // The only difference between ServiceGeneratorV1 and V2 is that the service port is named "default" in V1, while it is left unnamed in V2. type ServiceGeneratorV1 struct{} -func (ServiceGeneratorV1) ParamNames() []GeneratorParam { +func (ServiceGeneratorV1) ParamNames() []generate.GeneratorParam { return paramNames() } func (ServiceGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { params["port-name"] = "default" - return generate(params) + return generateService(params) } type ServiceGeneratorV2 struct{} -func (ServiceGeneratorV2) ParamNames() []GeneratorParam { +func (ServiceGeneratorV2) ParamNames() []generate.GeneratorParam { return paramNames() } func (ServiceGeneratorV2) Generate(params map[string]interface{}) (runtime.Object, error) { - return generate(params) + return generateService(params) } -func paramNames() []GeneratorParam { - return []GeneratorParam{ - {"default-name", true}, - {"name", false}, - {"selector", true}, +func paramNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "default-name", Required: true}, + {Name: "name", Required: false}, + {Name: "selector", Required: true}, // port will be used if a user specifies --port OR the exposed object // has one port - {"port", false}, + {Name: "port", Required: false}, // ports will be used iff a user doesn't specify --port AND the // exposed object has multiple ports - {"ports", false}, - {"labels", false}, - {"external-ip", false}, - {"load-balancer-ip", false}, - {"type", false}, - {"protocol", false}, + {Name: "ports", Required: false}, + {Name: "labels", Required: false}, + {Name: "external-ip", Required: false}, + {Name: "load-balancer-ip", Required: false}, + {Name: "type", Required: false}, + {Name: "protocol", Required: false}, // protocols will be used to keep port-protocol mapping derived from // exposed object - {"protocols", false}, - {"container-port", false}, // alias of target-port - {"target-port", false}, - {"port-name", false}, - {"session-affinity", false}, - {"cluster-ip", false}, + {Name: "protocols", Required: false}, + {Name: "container-port", Required: false}, // alias of target-port + {Name: "target-port", Required: false}, + {Name: "port-name", Required: false}, + {Name: "session-affinity", Required: false}, + {Name: "cluster-ip", Required: false}, } } -func generate(genericParams map[string]interface{}) (runtime.Object, error) { +func generateService(genericParams map[string]interface{}) (runtime.Object, error) { params := map[string]string{} for key, value := range genericParams { strVal, isString := value.(string) @@ -87,9 +88,9 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { } selectorString, found := params["selector"] if !found || len(selectorString) == 0 { - return nil, fmt.Errorf("'selector' is a required parameter.") + return nil, fmt.Errorf("'selector' is a required parameter") } - selector, err := ParseLabels(selectorString) + selector, err := generate.ParseLabels(selectorString) if err != nil { return nil, err } @@ -97,7 +98,7 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { labelsString, found := params["labels"] var labels map[string]string if found && len(labelsString) > 0 { - labels, err = ParseLabels(labelsString) + labels, err = generate.ParseLabels(labelsString) if err != nil { return nil, err } @@ -107,7 +108,7 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { if !found || len(name) == 0 { name, found = params["default-name"] if !found || len(name) == 0 { - return nil, fmt.Errorf("'name' is a required parameter.") + return nil, fmt.Errorf("'name' is a required parameter") } } @@ -123,7 +124,7 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { protocolsString, found := params["protocols"] var portProtocolMap map[string]string if found && len(protocolsString) > 0 { - portProtocolMap, err = ParseProtocols(protocolsString) + portProtocolMap, err = generate.ParseProtocols(protocolsString) if err != nil { return nil, err } @@ -135,7 +136,7 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { if portString, found = params["ports"]; !found { portString, found = params["port"] if !found && !isHeadlessService { - return nil, fmt.Errorf("'ports' or 'port' is a required parameter.") + return nil, fmt.Errorf("'ports' or 'port' is a required parameter") } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/service_basic.go similarity index 85% rename from vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/service_basic.go index 1f31bd09a2a31..be1e6667f353a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/service_basic.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/kubernetes/pkg/kubectl/generate" ) type ServiceCommonGeneratorV1 struct { @@ -54,31 +55,31 @@ type ServiceExternalNameGeneratorV1 struct { ServiceCommonGeneratorV1 } -func (ServiceClusterIPGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"tcp", true}, - {"clusterip", false}, +func (ServiceClusterIPGeneratorV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "tcp", Required: true}, + {Name: "clusterip", Required: false}, } } -func (ServiceNodePortGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"tcp", true}, - {"nodeport", true}, +func (ServiceNodePortGeneratorV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "tcp", Required: true}, + {Name: "nodeport", Required: true}, } } -func (ServiceLoadBalancerGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"tcp", true}, +func (ServiceLoadBalancerGeneratorV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "tcp", Required: true}, } } -func (ServiceExternalNameGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"externalname", true}, +func (ServiceExternalNameGeneratorV1) ParamNames() []generate.GeneratorParam { + return []generate.GeneratorParam{ + {Name: "name", Required: true}, + {Name: "externalname", Required: true}, } } @@ -138,7 +139,7 @@ func (s ServiceCommonGeneratorV1) GenerateCommon(params map[string]interface{}) } func (s ServiceLoadBalancerGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) + err := generate.ValidateParams(s.ParamNames(), params) if err != nil { return nil, err } @@ -151,7 +152,7 @@ func (s ServiceLoadBalancerGeneratorV1) Generate(params map[string]interface{}) } func (s ServiceNodePortGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) + err := generate.ValidateParams(s.ParamNames(), params) if err != nil { return nil, err } @@ -164,7 +165,7 @@ func (s ServiceNodePortGeneratorV1) Generate(params map[string]interface{}) (run } func (s ServiceClusterIPGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) + err := generate.ValidateParams(s.ParamNames(), params) if err != nil { return nil, err } @@ -177,7 +178,7 @@ func (s ServiceClusterIPGeneratorV1) Generate(params map[string]interface{}) (ru } func (s ServiceExternalNameGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) + err := generate.ValidateParams(s.ParamNames(), params) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/serviceaccount.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/serviceaccount.go index fa701e140a241..e69b88de18437 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/serviceaccount.go @@ -14,13 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package versioned import ( "fmt" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/kubectl/generate" ) // ServiceAccountGeneratorV1 supports stable generation of a service account @@ -30,7 +31,7 @@ type ServiceAccountGeneratorV1 struct { } // Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &ServiceAccountGeneratorV1{} +var _ generate.StructuredGenerator = &ServiceAccountGeneratorV1{} // StructuredGenerate outputs a service account object using the configured fields func (g *ServiceAccountGeneratorV1) StructuredGenerate() (runtime.Object, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/generated/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/generated/BUILD.bazel new file mode 100644 index 0000000000000..5e080fc044b7a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generated/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["bindata.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/generated", + importpath = "k8s.io/kubernetes/pkg/kubectl/generated", + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/generated/bindata.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generated/bindata.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/generated/bindata.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/generated/bindata.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/history.go b/vendor/k8s.io/kubernetes/pkg/kubectl/history.go index 4fcd4e526d7e3..a1a4829aeb6a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/history.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/history.go @@ -23,7 +23,7 @@ import ( "text/tabwriter" appsv1 "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,12 +34,10 @@ import ( "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/kubernetes" clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - api "k8s.io/kubernetes/pkg/apis/core" - apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" kapps "k8s.io/kubernetes/pkg/kubectl/apps" + describe "k8s.io/kubernetes/pkg/kubectl/describe/versioned" + deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" sliceutil "k8s.io/kubernetes/pkg/kubectl/util/slice" - printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" ) const ( @@ -116,7 +114,7 @@ func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision i allRSs = append(allRSs, newRS) } - historyInfo := make(map[int64]*v1.PodTemplateSpec) + historyInfo := make(map[int64]*corev1.PodTemplateSpec) for _, rs := range allRSs { v, err := deploymentutil.Revision(rs) if err != nil { @@ -166,14 +164,10 @@ func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision i }) } -func printTemplate(template *v1.PodTemplateSpec) (string, error) { +func printTemplate(template *corev1.PodTemplateSpec) (string, error) { buf := bytes.NewBuffer([]byte{}) - internalTemplate := &api.PodTemplateSpec{} - if err := apiv1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(template, internalTemplate, nil); err != nil { - return "", fmt.Errorf("failed to convert podtemplate, %v", err) - } - w := printersinternal.NewPrefixWriter(buf) - printersinternal.DescribePodTemplate(internalTemplate, w) + w := describe.NewPrefixWriter(buf) + describe.DescribePodTemplate(template, w) return buf.String(), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil/BUILD.bazel index 9da22215d027e..923edb14bb2c5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil/BUILD.bazel @@ -18,7 +18,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/printers:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/printers:go_default_library", "//vendor/k8s.io/metrics/pkg/apis/metrics:go_default_library", "//vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil/metrics_client.go b/vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil/metrics_client.go index 7b7c13d0b88a6..73399b233b0fb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil/metrics_client.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil/metrics_client.go @@ -64,7 +64,7 @@ func NewHeapsterMetricsClient(svcClient corev1client.ServicesGetter, namespace, } } -func podMetricsUrl(namespace string, name string) (string, error) { +func podMetricsURL(namespace string, name string) (string, error) { if namespace == metav1.NamespaceAll { return fmt.Sprintf("%s/pods", metricsRoot), nil } @@ -83,7 +83,7 @@ func podMetricsUrl(namespace string, name string) (string, error) { return fmt.Sprintf("%s/namespaces/%s/pods/%s", metricsRoot, namespace, name), nil } -func nodeMetricsUrl(name string) (string, error) { +func nodeMetricsURL(name string) (string, error) { if len(name) > 0 { errs := validation.NameIsDNSSubdomain(name, false) if len(errs) > 0 { @@ -96,7 +96,7 @@ func nodeMetricsUrl(name string) (string, error) { func (cli *HeapsterMetricsClient) GetNodeMetrics(nodeName string, selector string) (*metricsapi.NodeMetricsList, error) { params := map[string]string{"labelSelector": selector} - path, err := nodeMetricsUrl(nodeName) + path, err := nodeMetricsURL(nodeName) if err != nil { return nil, err } @@ -130,7 +130,7 @@ func (cli *HeapsterMetricsClient) GetPodMetrics(namespace string, podName string if allNamespaces { namespace = metav1.NamespaceAll } - path, err := podMetricsUrl(namespace, podName) + path, err := podMetricsURL(namespace, podName) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil/metrics_printer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil/metrics_printer.go index 9e1f45b25c8ef..c02af149cd42b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil/metrics_printer.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/metricsutil/metrics_printer.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/kubernetes/pkg/printers" + "k8s.io/kubernetes/pkg/kubectl/util/printers" metricsapi "k8s.io/metrics/pkg/apis/metrics" ) @@ -53,7 +53,7 @@ func NewTopCmdPrinter(out io.Writer) *TopCmdPrinter { return &TopCmdPrinter{out: out} } -func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metricsapi.NodeMetrics, availableResources map[string]v1.ResourceList) error { +func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metricsapi.NodeMetrics, availableResources map[string]v1.ResourceList, noHeaders bool) error { if len(metrics) == 0 { return nil } @@ -63,8 +63,9 @@ func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metricsapi.NodeMetrics, sort.Slice(metrics, func(i, j int) bool { return metrics[i].Name < metrics[j].Name }) - - printColumnNames(w, NodeColumns) + if !noHeaders { + printColumnNames(w, NodeColumns) + } var usage v1.ResourceList for _, m := range metrics { err := scheme.Scheme.Convert(&m.Usage, &usage, nil) @@ -86,18 +87,20 @@ func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metricsapi.NodeMetrics, return nil } -func (printer *TopCmdPrinter) PrintPodMetrics(metrics []metricsapi.PodMetrics, printContainers bool, withNamespace bool) error { +func (printer *TopCmdPrinter) PrintPodMetrics(metrics []metricsapi.PodMetrics, printContainers bool, withNamespace bool, noHeaders bool) error { if len(metrics) == 0 { return nil } w := printers.GetNewTabWriter(printer.out) defer w.Flush() - - if withNamespace { - printValue(w, NamespaceColumn) - } - if printContainers { - printValue(w, PodColumn) + if !noHeaders { + if withNamespace { + printValue(w, NamespaceColumn) + } + if printContainers { + printValue(w, PodColumn) + } + printColumnNames(w, PodColumns) } sort.Slice(metrics, func(i, j int) bool { @@ -106,8 +109,6 @@ func (printer *TopCmdPrinter) PrintPodMetrics(metrics []metricsapi.PodMetrics, p } return metrics[i].Name < metrics[j].Name }) - - printColumnNames(w, PodColumns) for _, m := range metrics { err := printSinglePodMetrics(w, &m, printContainers, withNamespace) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/BUILD.bazel index 7edcdf71fe382..d23344c41dc2c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/BUILD.bazel @@ -33,7 +33,6 @@ go_library( "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -43,14 +42,9 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/watch:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/batch:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core/v1:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/controller:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/generate:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/attachablepodforobject.go b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/attachablepodforobject.go index bb6147e25879a..52a4d053848d1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/attachablepodforobject.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/attachablepodforobject.go @@ -21,23 +21,16 @@ import ( "sort" "time" - "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/genericclioptions" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - api "k8s.io/kubernetes/pkg/apis/core" - apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/kubectl/util/podutils" ) // attachablePodForObject returns the pod to which to attach given an object. func attachablePodForObject(restClientGetter genericclioptions.RESTClientGetter, object runtime.Object, timeout time.Duration) (*corev1.Pod, error) { switch t := object.(type) { - case *api.Pod: - externalPod := &corev1.Pod{} - err := apiv1.Convert_core_Pod_To_v1_Pod(t, externalPod, nil) - return externalPod, err case *corev1.Pod: return t, nil } @@ -55,7 +48,7 @@ func attachablePodForObject(restClientGetter genericclioptions.RESTClientGetter, if err != nil { return nil, fmt.Errorf("cannot attach to %T: %v", object, err) } - sortBy := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + sortBy := func(pods []*corev1.Pod) sort.Interface { return sort.Reverse(podutils.ActivePods(pods)) } pod, _, err := GetFirstPod(clientset, namespace, selector.String(), timeout, sortBy) return pod, err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/canbeautoscaled.go b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/canbeautoscaled.go index 6009f2b5733f1..c91e816f7d2fe 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/canbeautoscaled.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/canbeautoscaled.go @@ -19,16 +19,21 @@ package polymorphichelpers import ( "fmt" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/apis/apps" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" ) func canBeAutoscaled(kind schema.GroupKind) error { switch kind { - case api.Kind("ReplicationController"), extensions.Kind("ReplicaSet"), - extensions.Kind("Deployment"), apps.Kind("Deployment"), apps.Kind("ReplicaSet"): + case + corev1.SchemeGroupVersion.WithKind("ReplicationController").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("StatefulSet").GroupKind(), + extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(): // nothing to do here default: return fmt.Errorf("cannot autoscale a %v", kind) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/canbeexposed.go b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/canbeexposed.go index af4463fe999c5..b232ff853fe0e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/canbeexposed.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/canbeexposed.go @@ -19,17 +19,23 @@ package polymorphichelpers import ( "fmt" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/apis/apps" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" ) // Check whether the kind of resources could be exposed func canBeExposed(kind schema.GroupKind) error { switch kind { - case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), - extensions.Kind("Deployment"), apps.Kind("Deployment"), extensions.Kind("ReplicaSet"), apps.Kind("ReplicaSet"): + case + corev1.SchemeGroupVersion.WithKind("ReplicationController").GroupKind(), + corev1.SchemeGroupVersion.WithKind("Service").GroupKind(), + corev1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(), + extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(): // nothing to do here default: return fmt.Errorf("cannot expose a %s", kind) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/helpers.go index f55ec48933634..27286e71d5e6d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/helpers.go @@ -26,7 +26,6 @@ import ( appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" batchv1 "k8s.io/api/batch/v1" - "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,22 +34,18 @@ import ( "k8s.io/apimachinery/pkg/watch" coreclient "k8s.io/client-go/kubernetes/typed/core/v1" watchtools "k8s.io/client-go/tools/watch" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/batch" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" ) // GetFirstPod returns a pod matching the namespace and label selector // and the number of all pods that match the label selector. -func GetFirstPod(client coreclient.PodsGetter, namespace string, selector string, timeout time.Duration, sortBy func([]*v1.Pod) sort.Interface) (*v1.Pod, int, error) { +func GetFirstPod(client coreclient.PodsGetter, namespace string, selector string, timeout time.Duration, sortBy func([]*corev1.Pod) sort.Interface) (*corev1.Pod, int, error) { options := metav1.ListOptions{LabelSelector: selector} podList, err := client.Pods(namespace).List(options) if err != nil { return nil, 0, err } - pods := []*v1.Pod{} + pods := []*corev1.Pod{} for i := range podList.Items { pod := podList.Items[i] pods = append(pods, &pod) @@ -78,7 +73,7 @@ func GetFirstPod(client coreclient.PodsGetter, namespace string, selector string if err != nil { return nil, 0, err } - pod, ok := event.Object.(*v1.Pod) + pod, ok := event.Object.(*corev1.Pod) if !ok { return nil, 0, fmt.Errorf("%#v is not a pod event", event) } @@ -88,12 +83,6 @@ func GetFirstPod(client coreclient.PodsGetter, namespace string, selector string // SelectorsForObject returns the pod label selector for a given object func SelectorsForObject(object runtime.Object) (namespace string, selector labels.Selector, err error) { switch t := object.(type) { - case *extensions.ReplicaSet: - namespace = t.Namespace - selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) - if err != nil { - return "", nil, fmt.Errorf("invalid label selector: %v", err) - } case *extensionsv1beta1.ReplicaSet: namespace = t.Namespace selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) @@ -113,19 +102,10 @@ func SelectorsForObject(object runtime.Object) (namespace string, selector label return "", nil, fmt.Errorf("invalid label selector: %v", err) } - case *api.ReplicationController: - namespace = t.Namespace - selector = labels.SelectorFromSet(t.Spec.Selector) case *corev1.ReplicationController: namespace = t.Namespace selector = labels.SelectorFromSet(t.Spec.Selector) - case *apps.StatefulSet: - namespace = t.Namespace - selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) - if err != nil { - return "", nil, fmt.Errorf("invalid label selector: %v", err) - } case *appsv1.StatefulSet: namespace = t.Namespace selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) @@ -145,12 +125,6 @@ func SelectorsForObject(object runtime.Object) (namespace string, selector label return "", nil, fmt.Errorf("invalid label selector: %v", err) } - case *extensions.DaemonSet: - namespace = t.Namespace - selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) - if err != nil { - return "", nil, fmt.Errorf("invalid label selector: %v", err) - } case *extensionsv1beta1.DaemonSet: namespace = t.Namespace selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) @@ -170,12 +144,6 @@ func SelectorsForObject(object runtime.Object) (namespace string, selector label return "", nil, fmt.Errorf("invalid label selector: %v", err) } - case *extensions.Deployment: - namespace = t.Namespace - selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) - if err != nil { - return "", nil, fmt.Errorf("invalid label selector: %v", err) - } case *extensionsv1beta1.Deployment: namespace = t.Namespace selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) @@ -201,12 +169,6 @@ func SelectorsForObject(object runtime.Object) (namespace string, selector label return "", nil, fmt.Errorf("invalid label selector: %v", err) } - case *batch.Job: - namespace = t.Namespace - selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) - if err != nil { - return "", nil, fmt.Errorf("invalid label selector: %v", err) - } case *batchv1.Job: namespace = t.Namespace selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) @@ -214,12 +176,6 @@ func SelectorsForObject(object runtime.Object) (namespace string, selector label return "", nil, fmt.Errorf("invalid label selector: %v", err) } - case *api.Service: - namespace = t.Namespace - if t.Spec.Selector == nil || len(t.Spec.Selector) == 0 { - return "", nil, fmt.Errorf("invalid service '%s': Service is defined without a selector", t.Name) - } - selector = labels.SelectorFromSet(t.Spec.Selector) case *corev1.Service: namespace = t.Namespace if t.Spec.Selector == nil || len(t.Spec.Selector) == 0 { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/interface.go b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/interface.go index 348505d49d7b2..816bf91427cc3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/interface.go @@ -47,7 +47,7 @@ type HistoryViewerFunc func(restClientGetter genericclioptions.RESTClientGetter, var HistoryViewerFn HistoryViewerFunc = historyViewer // StatusViewerFunc is a function type that can tell you how to print rollout status -type StatusViewerFunc func(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (kubectl.StatusViewer, error) +type StatusViewerFunc func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) // StatusViewerFn gives a way to easily override the function for unit testing if needed var StatusViewerFn StatusViewerFunc = statusViewer diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/logsforobject.go b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/logsforobject.go index 0378b6e0cc9b9..85472a135eed7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/logsforobject.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/logsforobject.go @@ -29,8 +29,7 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" - coreinternal "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/kubectl/util/podutils" ) func logsForObject(restClientGetter genericclioptions.RESTClientGetter, object, options runtime.Object, timeout time.Duration, allContainers bool) ([]*rest.Request, error) { @@ -55,17 +54,6 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt } switch t := object.(type) { - case *coreinternal.PodList: - ret := []*rest.Request{} - for i := range t.Items { - currRet, err := logsForObjectWithClient(clientset, &t.Items[i], options, timeout, allContainers) - if err != nil { - return nil, err - } - ret = append(ret, currRet...) - } - return ret, nil - case *corev1.PodList: ret := []*rest.Request{} for i := range t.Items { @@ -77,34 +65,6 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt } return ret, nil - case *coreinternal.Pod: - // if allContainers is true, then we're going to locate all containers and then iterate through them. At that point, "allContainers" is false - if !allContainers { - return []*rest.Request{clientset.Pods(t.Namespace).GetLogs(t.Name, opts)}, nil - } - - ret := []*rest.Request{} - for _, c := range t.Spec.InitContainers { - currOpts := opts.DeepCopy() - currOpts.Container = c.Name - currRet, err := logsForObjectWithClient(clientset, t, currOpts, timeout, false) - if err != nil { - return nil, err - } - ret = append(ret, currRet...) - } - for _, c := range t.Spec.Containers { - currOpts := opts.DeepCopy() - currOpts.Container = c.Name - currRet, err := logsForObjectWithClient(clientset, t, currOpts, timeout, false) - if err != nil { - return nil, err - } - ret = append(ret, currRet...) - } - - return ret, nil - case *corev1.Pod: // if allContainers is true, then we're going to locate all containers and then iterate through them. At that point, "allContainers" is false if !allContainers { @@ -139,7 +99,7 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt return nil, fmt.Errorf("cannot get the logs from %T: %v", object, err) } - sortBy := func(pods []*v1.Pod) sort.Interface { return controller.ByLogging(pods) } + sortBy := func(pods []*v1.Pod) sort.Interface { return podutils.ByLogging(pods) } pod, numPods, err := GetFirstPod(clientset, namespace, selector.String(), timeout, sortBy) if err != nil { return nil, err diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/mapbasedselectorforobject.go b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/mapbasedselectorforobject.go index 1be70191af34b..3548edf23908c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/mapbasedselectorforobject.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/mapbasedselectorforobject.go @@ -25,9 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/kubectl/generate" ) // mapBasedSelectorForObject returns the map-based selector associated with the provided object. If a @@ -36,99 +34,119 @@ import ( func mapBasedSelectorForObject(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { - case *api.ReplicationController: - return kubectl.MakeLabels(t.Spec.Selector), nil case *corev1.ReplicationController: - return kubectl.MakeLabels(t.Spec.Selector), nil + return generate.MakeLabels(t.Spec.Selector), nil - case *api.Pod: - if len(t.Labels) == 0 { - return "", fmt.Errorf("the pod has no labels and cannot be exposed") - } - return kubectl.MakeLabels(t.Labels), nil case *corev1.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } - return kubectl.MakeLabels(t.Labels), nil + return generate.MakeLabels(t.Labels), nil - case *api.Service: - if t.Spec.Selector == nil { - return "", fmt.Errorf("the service has no pod selector set") - } - return kubectl.MakeLabels(t.Spec.Selector), nil case *corev1.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } - return kubectl.MakeLabels(t.Spec.Selector), nil + return generate.MakeLabels(t.Spec.Selector), nil - case *extensions.Deployment: - // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals - // operator, DoubleEquals operator and In operator with only one element in the set. - if len(t.Spec.Selector.MatchExpressions) > 0 { - return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) - } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil case *extensionsv1beta1.Deployment: - // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals - // operator, DoubleEquals operator and In operator with only one element in the set. - if len(t.Spec.Selector.MatchExpressions) > 0 { - return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + // "extensions" deployments use pod template labels if selector is not set. + var labels map[string]string + if t.Spec.Selector != nil { + // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals + // operator, DoubleEquals operator and In operator with only one element in the set. + if len(t.Spec.Selector.MatchExpressions) > 0 { + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + } + labels = t.Spec.Selector.MatchLabels + } else { + labels = t.Spec.Template.Labels } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil + if len(labels) == 0 { + return "", fmt.Errorf("the deployment has no labels or selectors and cannot be exposed") + } + return generate.MakeLabels(labels), nil + case *appsv1.Deployment: + // "apps" deployments must have the selector set. + if t.Spec.Selector == nil || len(t.Spec.Selector.MatchLabels) == 0 { + return "", fmt.Errorf("invalid deployment: no selectors, therefore cannot be exposed") + } // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil + return generate.MakeLabels(t.Spec.Selector.MatchLabels), nil + case *appsv1beta2.Deployment: - // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals - // operator, DoubleEquals operator and In operator with only one element in the set. - if len(t.Spec.Selector.MatchExpressions) > 0 { - return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + // "apps" deployments must have the selector set. + if t.Spec.Selector == nil || len(t.Spec.Selector.MatchLabels) == 0 { + return "", fmt.Errorf("invalid deployment: no selectors, therefore cannot be exposed") } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil - case *appsv1beta1.Deployment: // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil + return generate.MakeLabels(t.Spec.Selector.MatchLabels), nil - case *extensions.ReplicaSet: + case *appsv1beta1.Deployment: + // "apps" deployments must have the selector set. + if t.Spec.Selector == nil || len(t.Spec.Selector.MatchLabels) == 0 { + return "", fmt.Errorf("invalid deployment: no selectors, therefore cannot be exposed") + } // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil + return generate.MakeLabels(t.Spec.Selector.MatchLabels), nil + case *extensionsv1beta1.ReplicaSet: - // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals - // operator, DoubleEquals operator and In operator with only one element in the set. - if len(t.Spec.Selector.MatchExpressions) > 0 { - return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + // "extensions" replicasets use pod template labels if selector is not set. + var labels map[string]string + if t.Spec.Selector != nil { + // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals + // operator, DoubleEquals operator and In operator with only one element in the set. + if len(t.Spec.Selector.MatchExpressions) > 0 { + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + } + labels = t.Spec.Selector.MatchLabels + } else { + labels = t.Spec.Template.Labels } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil + if len(labels) == 0 { + return "", fmt.Errorf("the replica set has no labels or selectors and cannot be exposed") + } + return generate.MakeLabels(labels), nil + case *appsv1.ReplicaSet: + // "apps" replicasets must have the selector set. + if t.Spec.Selector == nil || len(t.Spec.Selector.MatchLabels) == 0 { + return "", fmt.Errorf("invalid replicaset: no selectors, therefore cannot be exposed") + } // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil + return generate.MakeLabels(t.Spec.Selector.MatchLabels), nil + case *appsv1beta2.ReplicaSet: + // "apps" replicasets must have the selector set. + if t.Spec.Selector == nil || len(t.Spec.Selector.MatchLabels) == 0 { + return "", fmt.Errorf("invalid replicaset: no selectors, therefore cannot be exposed") + } // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil + return generate.MakeLabels(t.Spec.Selector.MatchLabels), nil default: return "", fmt.Errorf("cannot extract pod selector from %T", object) } + } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/objectpauser.go b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/objectpauser.go index 8d77ee6e639e2..61aebba36bfa5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/objectpauser.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/objectpauser.go @@ -24,23 +24,13 @@ import ( appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/kubectl/scheme" ) // Currently only supports Deployments. func defaultObjectPauser(obj runtime.Object) ([]byte, error) { switch obj := obj.(type) { - case *extensions.Deployment: - if obj.Spec.Paused { - return nil, errors.New("is already paused") - } - obj.Spec.Paused = true - return runtime.Encode(internalVersionJSONEncoder(), obj) - case *extensionsv1beta1.Deployment: if obj.Spec.Paused { return nil, errors.New("is already paused") @@ -73,8 +63,3 @@ func defaultObjectPauser(obj runtime.Object) ([]byte, error) { return nil, fmt.Errorf("pausing is not supported") } } - -func internalVersionJSONEncoder() runtime.Encoder { - encoder := legacyscheme.Codecs.LegacyCodec(legacyscheme.Scheme.PrioritizedVersionsAllGroups()...) - return unstructured.JSONFallbackEncoder{Encoder: encoder} -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/objectresumer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/objectresumer.go index d3119bc7b5c17..90e8b2049c61c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/objectresumer.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/objectresumer.go @@ -25,19 +25,11 @@ import ( appsv1beta2 "k8s.io/api/apps/v1beta2" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/kubectl/scheme" ) func defaultObjectResumer(obj runtime.Object) ([]byte, error) { switch obj := obj.(type) { - case *extensions.Deployment: - if !obj.Spec.Paused { - return nil, errors.New("is not paused") - } - obj.Spec.Paused = false - return runtime.Encode(internalVersionJSONEncoder(), obj) - case *extensionsv1beta1.Deployment: if !obj.Spec.Paused { return nil, errors.New("is not paused") diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/portsforobject.go b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/portsforobject.go index 26f00bf08525a..6cc9a2a4e5124 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/portsforobject.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/portsforobject.go @@ -26,29 +26,19 @@ import ( corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" ) func portsForObject(object runtime.Object) ([]string, error) { switch t := object.(type) { - case *api.ReplicationController: - return getPortsInternal(t.Spec.Template.Spec), nil case *corev1.ReplicationController: return getPorts(t.Spec.Template.Spec), nil - case *api.Pod: - return getPortsInternal(t.Spec), nil case *corev1.Pod: return getPorts(t.Spec), nil - case *api.Service: - return getServicePortsInternal(t.Spec), nil case *corev1.Service: return getServicePorts(t.Spec), nil - case *extensions.Deployment: - return getPortsInternal(t.Spec.Template.Spec), nil case *extensionsv1beta1.Deployment: return getPorts(t.Spec.Template.Spec), nil case *appsv1.Deployment: @@ -58,8 +48,6 @@ func portsForObject(object runtime.Object) ([]string, error) { case *appsv1beta1.Deployment: return getPorts(t.Spec.Template.Spec), nil - case *extensions.ReplicaSet: - return getPortsInternal(t.Spec.Template.Spec), nil case *extensionsv1beta1.ReplicaSet: return getPorts(t.Spec.Template.Spec), nil case *appsv1.ReplicaSet: @@ -71,25 +59,6 @@ func portsForObject(object runtime.Object) ([]string, error) { } } -func getPortsInternal(spec api.PodSpec) []string { - result := []string{} - for _, container := range spec.Containers { - for _, port := range container.Ports { - result = append(result, strconv.Itoa(int(port.ContainerPort))) - } - } - return result -} - -// Extracts the ports exposed by a service from the given service spec. -func getServicePortsInternal(spec api.ServiceSpec) []string { - result := []string{} - for _, servicePort := range spec.Ports { - result = append(result, strconv.Itoa(int(servicePort.Port))) - } - return result -} - func getPorts(spec corev1.PodSpec) []string { result := []string{} for _, container := range spec.Containers { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/protocolsforobject.go b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/protocolsforobject.go index 3ab54b28e4840..2e5e5a2086dd5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/protocolsforobject.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/protocolsforobject.go @@ -26,30 +26,20 @@ import ( corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" ) func protocolsForObject(object runtime.Object) (map[string]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { - case *api.ReplicationController: - return getProtocolsInternal(t.Spec.Template.Spec), nil case *corev1.ReplicationController: return getProtocols(t.Spec.Template.Spec), nil - case *api.Pod: - return getProtocolsInternal(t.Spec), nil case *corev1.Pod: return getProtocols(t.Spec), nil - case *api.Service: - return getServiceProtocolsInternal(t.Spec), nil case *corev1.Service: return getServiceProtocols(t.Spec), nil - case *extensions.Deployment: - return getProtocolsInternal(t.Spec.Template.Spec), nil case *extensionsv1beta1.Deployment: return getProtocols(t.Spec.Template.Spec), nil case *appsv1.Deployment: @@ -59,8 +49,6 @@ func protocolsForObject(object runtime.Object) (map[string]string, error) { case *appsv1beta1.Deployment: return getProtocols(t.Spec.Template.Spec), nil - case *extensions.ReplicaSet: - return getProtocolsInternal(t.Spec.Template.Spec), nil case *extensionsv1beta1.ReplicaSet: return getProtocols(t.Spec.Template.Spec), nil case *appsv1.ReplicaSet: @@ -73,29 +61,14 @@ func protocolsForObject(object runtime.Object) (map[string]string, error) { } } -func getProtocolsInternal(spec api.PodSpec) map[string]string { - result := make(map[string]string) - for _, container := range spec.Containers { - for _, port := range container.Ports { - result[strconv.Itoa(int(port.ContainerPort))] = string(port.Protocol) - } - } - return result -} - -// Extracts the protocols exposed by a service from the given service spec. -func getServiceProtocolsInternal(spec api.ServiceSpec) map[string]string { - result := make(map[string]string) - for _, servicePort := range spec.Ports { - result[strconv.Itoa(int(servicePort.Port))] = string(servicePort.Protocol) - } - return result -} - func getProtocols(spec corev1.PodSpec) map[string]string { result := make(map[string]string) for _, container := range spec.Containers { for _, port := range container.Ports { + // Empty protocol must be defaulted (TCP) + if len(port.Protocol) == 0 { + port.Protocol = corev1.ProtocolTCP + } result[strconv.Itoa(int(port.ContainerPort))] = string(port.Protocol) } } @@ -106,6 +79,10 @@ func getProtocols(spec corev1.PodSpec) map[string]string { func getServiceProtocols(spec corev1.ServiceSpec) map[string]string { result := make(map[string]string) for _, servicePort := range spec.Ports { + // Empty protocol must be defaulted (TCP) + if len(servicePort.Protocol) == 0 { + servicePort.Protocol = corev1.ProtocolTCP + } result[strconv.Itoa(int(servicePort.Port))] = string(servicePort.Protocol) } return result diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/statusviewer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/statusviewer.go index 740c561250604..08a402b5ec8b4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/statusviewer.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/polymorphichelpers/statusviewer.go @@ -18,20 +18,10 @@ package polymorphichelpers import ( "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/kubectl" ) // statusViewer returns a StatusViewer for printing rollout status. -func statusViewer(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { - clientConfig, err := restClientGetter.ToRESTConfig() - if err != nil { - return nil, err - } - clientset, err := kubernetes.NewForConfig(clientConfig) - if err != nil { - return nil, err - } - return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) +func statusViewer(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { + return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind()) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/proxy/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/proxy/BUILD.bazel index 14dad7c0f2fc4..c6dba2b02eda7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/proxy/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/proxy/BUILD.bazel @@ -7,11 +7,11 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubectl/proxy", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/proxy:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/transport:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubectl/util:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/proxy/proxy_server.go b/vendor/k8s.io/kubernetes/pkg/kubectl/proxy/proxy_server.go index e60ebf0b58a34..85fe8fc8fd83c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/proxy/proxy_server.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/proxy/proxy_server.go @@ -26,11 +26,11 @@ import ( "strings" "time" - "github.com/golang/glog" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/proxy" "k8s.io/client-go/rest" "k8s.io/client-go/transport" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubectl/util" ) @@ -87,7 +87,7 @@ func MakeRegexpArray(str string) ([]*regexp.Regexp, error) { func MakeRegexpArrayOrDie(str string) []*regexp.Regexp { result, err := MakeRegexpArray(str) if err != nil { - glog.Fatalf("Error compiling re: %v", err) + klog.Fatalf("Error compiling re: %v", err) } return result } @@ -95,7 +95,7 @@ func MakeRegexpArrayOrDie(str string) []*regexp.Regexp { func matchesRegexp(str string, regexps []*regexp.Regexp) bool { for _, re := range regexps { if re.MatchString(str) { - glog.V(6).Infof("%v matched %s", str, re) + klog.V(6).Infof("%v matched %s", str, re) return true } } @@ -135,13 +135,12 @@ func extractHost(header string) (host string) { func (f *FilterServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) { host := extractHost(req.Host) if f.accept(req.Method, req.URL.Path, host) { - glog.V(3).Infof("Filter accepting %v %v %v", req.Method, req.URL.Path, host) + klog.V(3).Infof("Filter accepting %v %v %v", req.Method, req.URL.Path, host) f.delegate.ServeHTTP(rw, req) return } - glog.V(3).Infof("Filter rejecting %v %v %v", req.Method, req.URL.Path, host) - rw.WriteHeader(http.StatusForbidden) - rw.Write([]byte("

Unauthorized

")) + klog.V(3).Infof("Filter rejecting %v %v %v", req.Method, req.URL.Path, host) + http.Error(rw, http.StatusText(http.StatusForbidden), http.StatusForbidden) } // Server is a http.Handler which proxies Kubernetes APIs to remote API server. @@ -152,7 +151,7 @@ type Server struct { type responder struct{} func (r *responder) Error(w http.ResponseWriter, req *http.Request, err error) { - glog.Errorf("Error while proxying request: %v", err) + klog.Errorf("Error while proxying request: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go index 027abadc4f464..8851d5741051d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go @@ -19,14 +19,11 @@ package kubectl import ( "bytes" "fmt" - "os" - "os/signal" "sort" - "syscall" appsv1 "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -34,17 +31,10 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api/legacyscheme" - api "k8s.io/kubernetes/pkg/apis/core" - apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/apis/extensions" kapps "k8s.io/kubernetes/pkg/kubectl/apps" - sliceutil "k8s.io/kubernetes/pkg/kubectl/util/slice" - printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" - // kubectl should not be taking dependencies on logic in the controllers - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" + "k8s.io/kubernetes/pkg/kubectl/scheme" + deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" ) const ( @@ -105,144 +95,159 @@ type DeploymentRollbacker struct { } func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) { - d, ok := obj.(*extensions.Deployment) - if !ok { - return "", fmt.Errorf("passed object is not a Deployment: %#v", obj) + if toRevision < 0 { + return "", revisionNotFoundErr(toRevision) + } + accessor, err := meta.Accessor(obj) + if err != nil { + return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) + } + name := accessor.GetName() + namespace := accessor.GetNamespace() + + // TODO: Fix this after kubectl has been removed from core. It is not possible to convert the runtime.Object + // to the external appsv1 Deployment without round-tripping through an internal version of Deployment. We're + // currently getting rid of all internal versions of resources. So we specifically request the appsv1 version + // here. This follows the same pattern as for DaemonSet and StatefulSet. + deployment, err := r.c.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("failed to retrieve Deployment %s: %v", name, err) + } + + rsForRevision, err := deploymentRevision(deployment, r.c, toRevision) + if err != nil { + return "", err } if dryRun { - return simpleDryRun(d, r.c, toRevision) + return printTemplate(&rsForRevision.Spec.Template) } - if d.Spec.Paused { - return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/%s' and try again", d.Name) + if deployment.Spec.Paused { + return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/%s' and try again", name) } - deploymentRollback := &extensionsv1beta1.DeploymentRollback{ - Name: d.Name, - UpdatedAnnotations: updatedAnnotations, - RollbackTo: extensionsv1beta1.RollbackConfig{ - Revision: toRevision, - }, + + // Skip if the revision already matches current Deployment + if equalIgnoreHash(&rsForRevision.Spec.Template, &deployment.Spec.Template) { + return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil } - result := "" - // Get current events - events, err := r.c.CoreV1().Events(d.Namespace).List(metav1.ListOptions{}) - if err != nil { - return result, err + // remove hash label before patching back into the deployment + delete(rsForRevision.Spec.Template.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + + // compute deployment annotations + annotations := map[string]string{} + for k := range annotationsToSkip { + if v, ok := deployment.Annotations[k]; ok { + annotations[k] = v + } } - // Do the rollback - if err := r.c.ExtensionsV1beta1().Deployments(d.Namespace).Rollback(deploymentRollback); err != nil { - return result, err + for k, v := range rsForRevision.Annotations { + if !annotationsToSkip[k] { + annotations[k] = v + } } - // Watch for the changes of events - watch, err := r.c.CoreV1().Events(d.Namespace).Watch(metav1.ListOptions{Watch: true, ResourceVersion: events.ResourceVersion}) + + // make patch to restore + patchType, patch, err := getDeploymentPatch(&rsForRevision.Spec.Template, annotations) if err != nil { - return result, err + return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } - result = watchRollbackEvent(watch) - return result, err -} -// watchRollbackEvent watches for rollback events and returns rollback result -func watchRollbackEvent(w watch.Interface) string { - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt, os.Kill, syscall.SIGTERM) - for { - select { - case event, ok := <-w.ResultChan(): - if !ok { - return "" - } - obj, ok := event.Object.(*api.Event) - if !ok { - w.Stop() - return "" - } - isRollback, result := isRollbackEvent(obj) - if isRollback { - w.Stop() - return result - } - case <-signals: - w.Stop() - } + // Restore revision + if _, err = r.c.AppsV1().Deployments(namespace).Patch(name, patchType, patch); err != nil { + return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } + return rollbackSuccess, nil } -// isRollbackEvent checks if the input event is about rollback, and returns true and -// related result string back if it is. -func isRollbackEvent(e *api.Event) (bool, string) { - rollbackEventReasons := []string{deploymentutil.RollbackRevisionNotFound, deploymentutil.RollbackTemplateUnchanged, deploymentutil.RollbackDone} - for _, reason := range rollbackEventReasons { - if e.Reason == reason { - if reason == deploymentutil.RollbackDone { - return true, rollbackSuccess - } - return true, fmt.Sprintf("%s (%s: %s)", rollbackSkipped, e.Reason, e.Message) - } - } - return false, "" +// equalIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] +// We ignore pod-template-hash because: +// 1. The hash result would be different upon podTemplateSpec API changes +// (e.g. the addition of a new field will cause the hash code to change) +// 2. The deployment template won't have hash labels +func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool { + t1Copy := template1.DeepCopy() + t2Copy := template2.DeepCopy() + // Remove hash labels from template.Labels before comparing + delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) } -func simpleDryRun(deployment *extensions.Deployment, c kubernetes.Interface, toRevision int64) (string, error) { - externalDeployment := &appsv1.Deployment{} - if err := legacyscheme.Scheme.Convert(deployment, externalDeployment, nil); err != nil { - return "", fmt.Errorf("failed to convert deployment, %v", err) - } +// annotationsToSkip lists the annotations that should be preserved from the deployment and not +// copied from the replicaset when rolling a deployment back +var annotationsToSkip = map[string]bool{ + corev1.LastAppliedConfigAnnotation: true, + deploymentutil.RevisionAnnotation: true, + deploymentutil.RevisionHistoryAnnotation: true, + deploymentutil.DesiredReplicasAnnotation: true, + deploymentutil.MaxReplicasAnnotation: true, + appsv1.DeprecatedRollbackTo: true, +} + +// getPatch returns a patch that can be applied to restore a Deployment to a +// previous version. If the returned error is nil the patch is valid. +func getDeploymentPatch(podTemplate *corev1.PodTemplateSpec, annotations map[string]string) (types.PatchType, []byte, error) { + // Create a patch of the Deployment that replaces spec.template + patch, err := json.Marshal([]interface{}{ + map[string]interface{}{ + "op": "replace", + "path": "/spec/template", + "value": podTemplate, + }, + map[string]interface{}{ + "op": "replace", + "path": "/metadata/annotations", + "value": annotations, + }, + }) + return types.JSONPatchType, patch, err +} - _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(externalDeployment, c.AppsV1()) +func deploymentRevision(deployment *appsv1.Deployment, c kubernetes.Interface, toRevision int64) (revision *appsv1.ReplicaSet, err error) { + + _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1()) if err != nil { - return "", fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err) + return nil, fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err) } allRSs := allOldRSs if newRS != nil { allRSs = append(allRSs, newRS) } - revisionToSpec := make(map[int64]*v1.PodTemplateSpec) + var ( + latestReplicaSet *appsv1.ReplicaSet + latestRevision = int64(-1) + previousReplicaSet *appsv1.ReplicaSet + previousRevision = int64(-1) + ) for _, rs := range allRSs { - v, err := deploymentutil.Revision(rs) - if err != nil { - continue + if v, err := deploymentutil.Revision(rs); err == nil { + if toRevision == 0 { + if latestRevision < v { + // newest one we've seen so far + previousRevision = latestRevision + previousReplicaSet = latestReplicaSet + latestRevision = v + latestReplicaSet = rs + } else if previousRevision < v { + // second newest one we've seen so far + previousRevision = v + previousReplicaSet = rs + } + } else if toRevision == v { + return rs, nil + } } - revisionToSpec[v] = &rs.Spec.Template - } - - if len(revisionToSpec) < 2 { - return "", fmt.Errorf("no rollout history found for deployment %q", deployment.Name) } if toRevision > 0 { - template, ok := revisionToSpec[toRevision] - if !ok { - return "", revisionNotFoundErr(toRevision) - } - buf := bytes.NewBuffer([]byte{}) - internalTemplate := &api.PodTemplateSpec{} - if err := apiv1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(template, internalTemplate, nil); err != nil { - return "", fmt.Errorf("failed to convert podtemplate, %v", err) - } - w := printersinternal.NewPrefixWriter(buf) - printersinternal.DescribePodTemplate(internalTemplate, w) - return buf.String(), nil - } - - // Sort the revisionToSpec map by revision - revisions := make([]int64, 0, len(revisionToSpec)) - for r := range revisionToSpec { - revisions = append(revisions, r) + return nil, revisionNotFoundErr(toRevision) } - sliceutil.SortInts64(revisions) - template, _ := revisionToSpec[revisions[len(revisions)-2]] - buf := bytes.NewBuffer([]byte{}) - buf.WriteString("\n") - internalTemplate := &api.PodTemplateSpec{} - if err := apiv1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(template, internalTemplate, nil); err != nil { - return "", fmt.Errorf("failed to convert podtemplate, %v", err) + if previousReplicaSet == nil { + return nil, fmt.Errorf("no rollout history found for deployment %q", deployment.Name) } - w := printersinternal.NewPrefixWriter(buf) - printersinternal.DescribePodTemplate(internalTemplate, w) - return buf.String(), nil + return previousReplicaSet, nil } type DaemonSetRollbacker struct { @@ -382,7 +387,7 @@ func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations return rollbackSuccess, nil } -var appsCodec = legacyscheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion) +var appsCodec = scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion) // applyRevision returns a new StatefulSet constructed by restoring the state in revision to set. If the returned error // is nil, the returned StatefulSet is valid. @@ -459,15 +464,12 @@ func findHistory(toRevision int64, allHistory []*appsv1.ControllerRevision) *app } // printPodTemplate converts a given pod template into a human-readable string. -func printPodTemplate(specTemplate *v1.PodTemplateSpec) (string, error) { - content := bytes.NewBuffer([]byte{}) - w := printersinternal.NewPrefixWriter(content) - internalTemplate := &api.PodTemplateSpec{} - if err := apiv1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(specTemplate, internalTemplate, nil); err != nil { - return "", fmt.Errorf("failed to convert podtemplate while printing: %v", err) - } - printersinternal.DescribePodTemplate(internalTemplate, w) - return fmt.Sprintf("will roll back to %s", content.String()), nil +func printPodTemplate(specTemplate *corev1.PodTemplateSpec) (string, error) { + podSpec, err := printTemplate(specTemplate) + if err != nil { + return "", err + } + return fmt.Sprintf("will roll back to %s", podSpec), nil } func revisionNotFoundErr(r int64) error { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go index 1c750614bba74..b6df08c412073 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go @@ -35,9 +35,9 @@ import ( scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/util/integer" "k8s.io/client-go/util/retry" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/kubectl/util" + deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" + "k8s.io/kubernetes/pkg/kubectl/util/podutils" ) func newInt32Ptr(val int) *int32 { @@ -54,7 +54,7 @@ func valOrZero(val *int32) int32 { const ( kubectlAnnotationPrefix = "kubectl.kubernetes.io/" - sourceIdAnnotation = kubectlAnnotationPrefix + "update-source-id" + sourceIDAnnotation = kubectlAnnotationPrefix + "update-source-id" desiredReplicasAnnotation = kubectlAnnotationPrefix + "desired-replicas" originalReplicasAnnotation = kubectlAnnotationPrefix + "original-replicas" nextControllerAnnotation = kubectlAnnotationPrefix + "next-controller-id" @@ -135,7 +135,7 @@ type RollingUpdater struct { scaleAndWait func(rc *corev1.ReplicationController, retry *RetryParams, wait *RetryParams) (*corev1.ReplicationController, error) //getOrCreateTargetController gets and validates an existing controller or //makes a new one. - getOrCreateTargetController func(controller *corev1.ReplicationController, sourceId string) (*corev1.ReplicationController, bool, error) + getOrCreateTargetController func(controller *corev1.ReplicationController, sourceID string) (*corev1.ReplicationController, bool, error) // cleanup performs post deployment cleanup tasks for newRc and oldRc. cleanup func(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error // getReadyPods returns the amount of old and new ready pods. @@ -188,8 +188,8 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { // Find an existing controller (for continuing an interrupted update) or // create a new one if necessary. - sourceId := fmt.Sprintf("%s:%s", oldRc.Name, oldRc.UID) - newRc, existed, err := r.getOrCreateTargetController(config.NewRc, sourceId) + sourceID := fmt.Sprintf("%s:%s", oldRc.Name, oldRc.UID) + newRc, existed, err := r.getOrCreateTargetController(config.NewRc, sourceID) if err != nil { return err } @@ -443,7 +443,7 @@ func (r *RollingUpdater) readyPods(oldRc, newRc *corev1.ReplicationController, m if v1Pod.DeletionTimestamp != nil { continue } - if !podutil.IsPodAvailable(&v1Pod, minReadySeconds, r.nowFn()) { + if !podutils.IsPodAvailable(&v1Pod, minReadySeconds, r.nowFn()) { continue } switch controller.Name { @@ -458,14 +458,14 @@ func (r *RollingUpdater) readyPods(oldRc, newRc *corev1.ReplicationController, m } // getOrCreateTargetControllerWithClient looks for an existing controller with -// sourceId. If found, the existing controller is returned with true +// sourceID. If found, the existing controller is returned with true // indicating that the controller already exists. If the controller isn't // found, a new one is created and returned along with false indicating the // controller was created. // -// Existing controllers are validated to ensure their sourceIdAnnotation -// matches sourceId; if there's a mismatch, an error is returned. -func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *corev1.ReplicationController, sourceId string) (*corev1.ReplicationController, bool, error) { +// Existing controllers are validated to ensure their sourceIDAnnotation +// matches sourceID; if there's a mismatch, an error is returned. +func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *corev1.ReplicationController, sourceID string) (*corev1.ReplicationController, bool, error) { existingRc, err := r.existingController(controller) if err != nil { if !errors.IsNotFound(err) { @@ -474,24 +474,24 @@ func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *corev return nil, false, err } if valOrZero(controller.Spec.Replicas) <= 0 { - return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d\n", controller.Name, valOrZero(controller.Spec.Replicas)) + return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d", controller.Name, valOrZero(controller.Spec.Replicas)) } // The controller wasn't found, so create it. if controller.Annotations == nil { controller.Annotations = map[string]string{} } controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", valOrZero(controller.Spec.Replicas)) - controller.Annotations[sourceIdAnnotation] = sourceId + controller.Annotations[sourceIDAnnotation] = sourceID controller.Spec.Replicas = newInt32Ptr(0) newRc, err := r.rcClient.ReplicationControllers(r.ns).Create(controller) return newRc, false, err } // Validate and use the existing controller. annotations := existingRc.Annotations - source := annotations[sourceIdAnnotation] + source := annotations[sourceIDAnnotation] _, ok := annotations[desiredReplicasAnnotation] - if source != sourceId || !ok { - return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceId, annotations) + if source != sourceID || !ok { + return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceID, annotations) } return existingRc, true, nil } @@ -517,7 +517,7 @@ func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *corev1.ReplicationCont return err } applyUpdate := func(rc *corev1.ReplicationController) { - delete(rc.Annotations, sourceIdAnnotation) + delete(rc.Annotations, sourceIDAnnotation) delete(rc.Annotations, desiredReplicasAnnotation) } if newRc, err = updateRcWithRetries(r.rcClient, r.ns, newRc, applyUpdate); err != nil { @@ -662,7 +662,7 @@ func AbortRollingUpdate(c *RollingUpdaterConfig) error { if c.NewRc.Annotations == nil { c.NewRc.Annotations = map[string]string{} } - c.NewRc.Annotations[sourceIdAnnotation] = fmt.Sprintf("%s:%s", c.OldRc.Name, c.OldRc.UID) + c.NewRc.Annotations[sourceIDAnnotation] = fmt.Sprintf("%s:%s", c.OldRc.Name, c.OldRc.UID) // Use the original value since the replica count change from old to new // could be asymmetric. If we don't know the original count, we can't safely @@ -841,7 +841,7 @@ func FindSourceController(r corev1client.ReplicationControllersGetter, namespace } for ix := range list.Items { rc := &list.Items[ix] - if rc.Annotations != nil && strings.HasPrefix(rc.Annotations[sourceIdAnnotation], name) { + if rc.Annotations != nil && strings.HasPrefix(rc.Annotations[sourceIDAnnotation], name) { return rc, nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go index 184cb892494ad..932e905175eff 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go @@ -23,11 +23,8 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" - clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/kubectl/scheme" + deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" ) // StatusViewer provides an interface for resources that have rollout status. @@ -36,32 +33,28 @@ type StatusViewer interface { } // StatusViewerFor returns a StatusViewer for the resource specified by kind. -func StatusViewerFor(kind schema.GroupKind, c kubernetes.Interface) (StatusViewer, error) { +func StatusViewerFor(kind schema.GroupKind) (StatusViewer, error) { switch kind { - case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), apps.Kind("Deployment"): - return &DeploymentStatusViewer{c.AppsV1()}, nil - case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(), apps.Kind("DaemonSet"): - return &DaemonSetStatusViewer{c.AppsV1()}, nil - case apps.Kind("StatefulSet"): - return &StatefulSetStatusViewer{c.AppsV1()}, nil + case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(): + return &DeploymentStatusViewer{}, nil + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(): + return &DaemonSetStatusViewer{}, nil + case appsv1.SchemeGroupVersion.WithKind("StatefulSet").GroupKind(): + return &StatefulSetStatusViewer{}, nil } return nil, fmt.Errorf("no status viewer has been implemented for %v", kind) } // DeploymentStatusViewer implements the StatusViewer interface. -type DeploymentStatusViewer struct { - c clientappsv1.DeploymentsGetter -} +type DeploymentStatusViewer struct{} // DaemonSetStatusViewer implements the StatusViewer interface. -type DaemonSetStatusViewer struct { - c clientappsv1.DaemonSetsGetter -} +type DaemonSetStatusViewer struct{} // StatefulSetStatusViewer implements the StatusViewer interface. -type StatefulSetStatusViewer struct { - c clientappsv1.StatefulSetsGetter -} +type StatefulSetStatusViewer struct{} // Status returns a message describing deployment status, and a bool value indicating if the status is considered done. func (s *DeploymentStatusViewer) Status(obj runtime.Unstructured, revision int64) (string, bool, error) { @@ -72,7 +65,7 @@ func (s *DeploymentStatusViewer) Status(obj runtime.Unstructured, revision int64 } if revision > 0 { - deploymentRev, err := util.Revision(deployment) + deploymentRev, err := deploymentutil.Revision(deployment) if err != nil { return "", false, fmt.Errorf("cannot get the revision of deployment %q: %v", deployment.Name, err) } @@ -81,8 +74,8 @@ func (s *DeploymentStatusViewer) Status(obj runtime.Unstructured, revision int64 } } if deployment.Generation <= deployment.Status.ObservedGeneration { - cond := util.GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing) - if cond != nil && cond.Reason == util.TimedOutReason { + cond := deploymentutil.GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing) + if cond != nil && cond.Reason == deploymentutil.TimedOutReason { return "", false, fmt.Errorf("deployment %q exceeded its progress deadline", deployment.Name) } if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas { @@ -141,7 +134,7 @@ func (s *StatefulSetStatusViewer) Status(obj runtime.Unstructured, revision int6 if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas { return fmt.Sprintf("Waiting for %d pods to be ready...\n", *sts.Spec.Replicas-sts.Status.ReadyReplicas), false, nil } - if sts.Spec.UpdateStrategy.Type == apps.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil { + if sts.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil { if sts.Spec.Replicas != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil { if sts.Status.UpdatedReplicas < (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition) { return fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...\n", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go index f1d46b7a110b2..54f96fd2c3407 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go @@ -175,7 +175,7 @@ func scaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupRe // or returns error when timeout happens func WaitForScaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupResource, resourceName string, namespace string, newSize uint, waitForReplicas *RetryParams) error { if waitForReplicas == nil { - return fmt.Errorf("waitForReplicas parameter cannot be nil!") + return fmt.Errorf("waitForReplicas parameter cannot be nil") } err := wait.PollImmediate( waitForReplicas.Interval, diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go index 880b115b178ab..a7b0833f6d14c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go @@ -30,6 +30,9 @@ var Scheme = runtime.NewScheme() // Codecs provides access to encoding and decoding for the scheme var Codecs = serializer.NewCodecFactory(Scheme) +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(Scheme) + // DefaultJSONEncoder returns a default encoder for our scheme func DefaultJSONEncoder() runtime.Encoder { return unstructured.JSONFallbackEncoder{Encoder: Codecs.LegacyCodec(Scheme.PrioritizedVersionsAllGroups()...)} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD.bazel index 2587f5fb10dd5..05f2cb612748c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD.bazel @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "pod_port.go", "service_port.go", "umask.go", "umask_windows.go", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/BUILD.bazel new file mode 100644 index 0000000000000..26de2dc908771 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["certificate.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate", + importpath = "k8s.io/kubernetes/pkg/kubectl/util/certificate", + visibility = ["//visibility:public"], + deps = ["//vendor/k8s.io/api/certificates/v1beta1:go_default_library"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/certificate.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/certificate.go new file mode 100644 index 0000000000000..201958c6fca74 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/certificate.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate + +import ( + "crypto/x509" + "encoding/pem" + "errors" + + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" +) + +// TODO(yue9944882): Remove this helper package once it's copied to k/api + +// ParseCSR extracts the CSR from the API object and decodes it. +func ParseCSR(obj *certificatesv1beta1.CertificateSigningRequest) (*x509.CertificateRequest, error) { + // extract PEM from request object + pemBytes := obj.Spec.Request + block, _ := pem.Decode(pemBytes) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + return csr, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/BUILD.bazel similarity index 54% rename from vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD.bazel rename to vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/BUILD.bazel index eaa13261349fc..b3b3bcc1a6e59 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/BUILD.bazel @@ -2,24 +2,18 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["deployment_util.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/controller/deployment/util", - importpath = "k8s.io/kubernetes/pkg/controller/deployment/util", + srcs = ["deployment.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment", + importpath = "k8s.io/kubernetes/pkg/kubectl/util/deployment", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", - "//vendor/k8s.io/client-go/util/integer:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/controller:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/util/labels:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/deployment.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/deployment.go new file mode 100644 index 0000000000000..72f99f7f2ee45 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/deployment.go @@ -0,0 +1,230 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "sort" + "strconv" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + intstrutil "k8s.io/apimachinery/pkg/util/intstr" + appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" +) + +const ( + // RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence + RevisionAnnotation = "deployment.kubernetes.io/revision" + // RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment. + RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history" + // DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation + // in its replica sets. Helps in separating scaling events from the rollout process and for + // determining if the new replica set for a deployment is really saturated. + DesiredReplicasAnnotation = "deployment.kubernetes.io/desired-replicas" + // MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which + // is deployment.spec.replicas + maxSurge. Used by the underlying replica sets to estimate their + // proportions in case the deployment has surge replicas. + MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas" + // RollbackRevisionNotFound is not found rollback event reason + RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound" + // RollbackTemplateUnchanged is the template unchanged rollback event reason + RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged" + // RollbackDone is the done rollback event reason + RollbackDone = "DeploymentRollback" + // TimedOutReason is added in a deployment when its newest replica set fails to show any progress + // within the given deadline (progressDeadlineSeconds). + TimedOutReason = "ProgressDeadlineExceeded" +) + +// GetDeploymentCondition returns the condition with the provided type. +func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition { + for i := range status.Conditions { + c := status.Conditions[i] + if c.Type == condType { + return &c + } + } + return nil +} + +// Revision returns the revision number of the input object. +func Revision(obj runtime.Object) (int64, error) { + acc, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + v, ok := acc.GetAnnotations()[RevisionAnnotation] + if !ok { + return 0, nil + } + return strconv.ParseInt(v, 10, 64) +} + +// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and +// ReplicaSetList from client interface. Note that the first set of old replica sets doesn't include the ones +// with no pods, and the second set of old replica sets include all old replica sets. The third returned value +// is the new replica set, and it may be nil if it doesn't exist yet. +func GetAllReplicaSets(deployment *appsv1.Deployment, c appsclient.AppsV1Interface) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet, *appsv1.ReplicaSet, error) { + rsList, err := listReplicaSets(deployment, rsListFromClient(c)) + if err != nil { + return nil, nil, nil, err + } + oldRSes, allOldRSes := findOldReplicaSets(deployment, rsList) + newRS := findNewReplicaSet(deployment, rsList) + return oldRSes, allOldRSes, newRS, nil +} + +// RsListFromClient returns an rsListFunc that wraps the given client. +func rsListFromClient(c appsclient.AppsV1Interface) rsListFunc { + return func(namespace string, options metav1.ListOptions) ([]*appsv1.ReplicaSet, error) { + rsList, err := c.ReplicaSets(namespace).List(options) + if err != nil { + return nil, err + } + var ret []*appsv1.ReplicaSet + for i := range rsList.Items { + ret = append(ret, &rsList.Items[i]) + } + return ret, err + } +} + +// TODO: switch this to full namespacers +type rsListFunc func(string, metav1.ListOptions) ([]*appsv1.ReplicaSet, error) +type podListFunc func(string, metav1.ListOptions) (*corev1.PodList, error) + +// listReplicaSets returns a slice of RSes the given deployment targets. +// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), +// because only the controller itself should do that. +// However, it does filter out anything whose ControllerRef doesn't match. +func listReplicaSets(deployment *appsv1.Deployment, getRSList rsListFunc) ([]*appsv1.ReplicaSet, error) { + // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector + // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830. + namespace := deployment.Namespace + selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) + if err != nil { + return nil, err + } + options := metav1.ListOptions{LabelSelector: selector.String()} + all, err := getRSList(namespace, options) + if err != nil { + return nil, err + } + // Only include those whose ControllerRef matches the Deployment. + owned := make([]*appsv1.ReplicaSet, 0, len(all)) + for _, rs := range all { + if metav1.IsControlledBy(rs, deployment) { + owned = append(owned, rs) + } + } + return owned, nil +} + +// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] +// We ignore pod-template-hash because: +// 1. The hash result would be different upon podTemplateSpec API changes +// (e.g. the addition of a new field will cause the hash code to change) +// 2. The deployment template won't have hash labels +func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool { + t1Copy := template1.DeepCopy() + t2Copy := template2.DeepCopy() + // Remove hash labels from template.Labels before comparing + delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) +} + +// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). +func findNewReplicaSet(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) *appsv1.ReplicaSet { + sort.Sort(replicaSetsByCreationTimestamp(rsList)) + for i := range rsList { + if equalIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) { + // In rare cases, such as after cluster upgrades, Deployment may end up with + // having more than one new ReplicaSets that have the same template as its template, + // see https://github.com/kubernetes/kubernetes/issues/40415 + // We deterministically choose the oldest new ReplicaSet. + return rsList[i] + } + } + // new ReplicaSet does not exist. + return nil +} + +// replicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker. +type replicaSetsByCreationTimestamp []*appsv1.ReplicaSet + +func (o replicaSetsByCreationTimestamp) Len() int { return len(o) } +func (o replicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o replicaSetsByCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) +} + +// // FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes. +// // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. +func findOldReplicaSets(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet) { + var requiredRSs []*appsv1.ReplicaSet + var allRSs []*appsv1.ReplicaSet + newRS := findNewReplicaSet(deployment, rsList) + for _, rs := range rsList { + // Filter out new replica set + if newRS != nil && rs.UID == newRS.UID { + continue + } + allRSs = append(allRSs, rs) + if *(rs.Spec.Replicas) != 0 { + requiredRSs = append(requiredRSs, rs) + } + } + return requiredRSs, allRSs +} + +// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one +// step. For example: +// +// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) +// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) +// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) +// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { + surge, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true) + if err != nil { + return 0, 0, err + } + unavailable, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false) + if err != nil { + return 0, 0, err + } + + if surge == 0 && unavailable == 0 { + // Validation should never allow the user to explicitly use zero values for both maxSurge + // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. + // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the + // theory that surge might not work due to quota. + unavailable = 1 + } + + return int32(surge), int32(unavailable), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/BUILD.bazel new file mode 100644 index 0000000000000..d35758f3c742c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["sorted_event_list.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/event", + importpath = "k8s.io/kubernetes/pkg/kubectl/util/event", + visibility = ["//visibility:public"], + deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/events/sorted_event_list.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/sorted_event_list.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/api/events/sorted_event_list.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/util/event/sorted_event_list.go index 9976c10ce734a..9967f953e68f5 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/events/sorted_event_list.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/sorted_event_list.go @@ -14,14 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package events +package event import ( - api "k8s.io/kubernetes/pkg/apis/core" + corev1 "k8s.io/api/core/v1" ) // SortableEvents implements sort.Interface for []api.Event based on the Timestamp field -type SortableEvents []api.Event +type SortableEvents []corev1.Event func (list SortableEvents) Len() int { return len(list) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/BUILD.bazel new file mode 100644 index 0000000000000..8b1a9ace34bac --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["fieldpath.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath", + importpath = "k8s.io/kubernetes/pkg/kubectl/util/fieldpath", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/fieldpath.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/fieldpath.go new file mode 100644 index 0000000000000..efd08cde0f530 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/fieldpath.go @@ -0,0 +1,111 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" +) + +// TODO(yue9944882): Remove this helper package once it's copied to k/apimachinery + +// FormatMap formats map[string]string to a string. +func FormatMap(m map[string]string) (fmtStr string) { + // output with keys in sorted order to provide stable output + keys := sets.NewString() + for key := range m { + keys.Insert(key) + } + for _, key := range keys.List() { + fmtStr += fmt.Sprintf("%v=%q\n", key, m[key]) + } + fmtStr = strings.TrimSuffix(fmtStr, "\n") + + return +} + +// ExtractFieldPathAsString extracts the field from the given object +// and returns it as a string. The object must be a pointer to an +// API type. +func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", nil + } + + if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok { + switch path { + case "metadata.annotations": + if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetAnnotations()[subscript], nil + case "metadata.labels": + if errs := validation.IsQualifiedName(subscript); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetLabels()[subscript], nil + default: + return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath) + } + } + + switch fieldPath { + case "metadata.annotations": + return FormatMap(accessor.GetAnnotations()), nil + case "metadata.labels": + return FormatMap(accessor.GetLabels()), nil + case "metadata.name": + return accessor.GetName(), nil + case "metadata.namespace": + return accessor.GetNamespace(), nil + case "metadata.uid": + return string(accessor.GetUID()), nil + } + + return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath) +} + +// SplitMaybeSubscriptedPath checks whether the specified fieldPath is +// subscripted, and +// - if yes, this function splits the fieldPath into path and subscript, and +// returns (path, subscript, true). +// - if no, this function returns (fieldPath, "", false). +// +// Example inputs and outputs: +// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) +// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) +// - "metadata.labels['']" --> ("metadata.labels", "", true) +// - "metadata.labels" --> ("metadata.labels", "", false) +func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) { + if !strings.HasSuffix(fieldPath, "']") { + return fieldPath, "", false + } + s := strings.TrimSuffix(fieldPath, "']") + parts := strings.SplitN(s, "['", 2) + if len(parts) < 2 { + return fieldPath, "", false + } + if len(parts[0]) == 0 { + return fieldPath, "", false + } + return parts[0], parts[1], true +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/BUILD.bazel index 418f809327f03..e49afa71546b3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/BUILD.bazel @@ -8,7 +8,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/chai2010/gettext-go/gettext:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/generated:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/kubectl/generated:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/i18n.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/i18n.go index e287eab97de3b..a4ff9ac036ded 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/i18n.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/i18n.go @@ -24,10 +24,10 @@ import ( "os" "strings" - "k8s.io/kubernetes/pkg/generated" + "k8s.io/kubernetes/pkg/kubectl/generated" "github.com/chai2010/gettext-go/gettext" - "github.com/golang/glog" + "k8s.io/klog" ) var knownTranslations = map[string][]string{ @@ -50,14 +50,23 @@ var knownTranslations = map[string][]string{ } func loadSystemLanguage() string { - langStr := os.Getenv("LANG") + // Implements the following locale priority order: LC_ALL, LC_MESSAGES, LANG + // Similarly to: https://www.gnu.org/software/gettext/manual/html_node/Locale-Environment-Variables.html + langStr := os.Getenv("LC_ALL") if langStr == "" { - glog.V(3).Infof("Couldn't find the LANG environment variable, defaulting to en_US") + langStr = os.Getenv("LC_MESSAGES") + } + if langStr == "" { + langStr = os.Getenv("LANG") + } + + if langStr == "" { + klog.V(3).Infof("Couldn't find the LC_ALL, LC_MESSAGES or LANG environment variables, defaulting to en_US") return "default" } pieces := strings.Split(langStr, ".") if len(pieces) != 2 { - glog.V(3).Infof("Unexpected system language (%s), defaulting to en_US", langStr) + klog.V(3).Infof("Unexpected system language (%s), defaulting to en_US", langStr) return "default" } return pieces[0] @@ -74,7 +83,7 @@ func findLanguage(root string, getLanguageFn func() string) string { } } } - glog.V(3).Infof("Couldn't find translations for %s, using default", langStr) + klog.V(3).Infof("Couldn't find translations for %s, using default", langStr) return "default" } @@ -92,7 +101,7 @@ func LoadTranslations(root string, getLanguageFn func() string) error { fmt.Sprintf("%s/%s/LC_MESSAGES/k8s.mo", root, langStr), } - glog.V(3).Infof("Setting language to %s", langStr) + klog.V(3).Infof("Setting language to %s", langStr) // TODO: list the directory and load all files. buf := new(bytes.Buffer) w := zip.NewWriter(buf) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/pod_port.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/pod_port.go new file mode 100644 index 0000000000000..6d78501a89a96 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/pod_port.go @@ -0,0 +1,36 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + "k8s.io/api/core/v1" +) + +// LookupContainerPortNumberByName find containerPort number by its named port name +func LookupContainerPortNumberByName(pod v1.Pod, name string) (int32, error) { + for _, ctr := range pod.Spec.Containers { + for _, ctrportspec := range ctr.Ports { + if ctrportspec.Name == name { + return ctrportspec.ContainerPort, nil + } + } + } + + return int32(-1), fmt.Errorf("Pod '%s' does not have a named port '%s'", pod.Name, name) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/BUILD.bazel new file mode 100644 index 0000000000000..3d42d1b6caf6c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["podutils.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils", + importpath = "k8s.io/kubernetes/pkg/kubectl/util/podutils", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/client-go/util/integer:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/podutils.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/podutils.go new file mode 100644 index 0000000000000..2da66ef9c3f64 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/podutils.go @@ -0,0 +1,190 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podutils + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/integer" +) + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *corev1.Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := getPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *corev1.Pod) bool { + return isPodReadyConditionTrue(pod.Status) +} + +// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. +func isPodReadyConditionTrue(status corev1.PodStatus) bool { + condition := getPodReadyCondition(status) + return condition != nil && condition.Status == corev1.ConditionTrue +} + +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func getPodReadyCondition(status corev1.PodStatus) *corev1.PodCondition { + _, condition := getPodCondition(&status, corev1.PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func getPodCondition(status *corev1.PodStatus, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) { + if status == nil { + return -1, nil + } + return getPodConditionFromList(status.Conditions, conditionType) +} + +// GetPodConditionFromList extracts the provided condition from the given list of condition and +// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present. +func getPodConditionFromList(conditions []corev1.PodCondition, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) { + if conditions == nil { + return -1, nil + } + for i := range conditions { + if conditions[i].Type == conditionType { + return i, &conditions[i] + } + } + return -1, nil +} + +// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs. +type ByLogging []*corev1.Pod + +func (s ByLogging) Len() int { return len(s) } +func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s ByLogging) Less(i, j int) bool { + // 1. assigned < unassigned + if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { + return len(s[i].Spec.NodeName) > 0 + } + // 2. PodRunning < PodUnknown < PodPending + m := map[corev1.PodPhase]int{corev1.PodRunning: 0, corev1.PodUnknown: 1, corev1.PodPending: 2} + if m[s[i].Status.Phase] != m[s[j].Status.Phase] { + return m[s[i].Status.Phase] < m[s[j].Status.Phase] + } + // 3. ready < not ready + if IsPodReady(s[i]) != IsPodReady(s[j]) { + return IsPodReady(s[i]) + } + // TODO: take availability into account when we push minReadySeconds information from deployment into pods, + // see https://github.com/kubernetes/kubernetes/issues/22065 + // 4. Been ready for more time < less time < empty time + if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i])) + } + // 5. Pods with containers with higher restart counts < lower restart counts + if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { + return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) + } + // 6. older pods < newer pods < empty timestamp pods + if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) { + return afterOrZero(&s[j].CreationTimestamp, &s[i].CreationTimestamp) + } + return false +} + +// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete. +type ActivePods []*corev1.Pod + +func (s ActivePods) Len() int { return len(s) } +func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s ActivePods) Less(i, j int) bool { + // 1. Unassigned < assigned + // If only one of the pods is unassigned, the unassigned one is smaller + if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { + return len(s[i].Spec.NodeName) == 0 + } + // 2. PodPending < PodUnknown < PodRunning + m := map[corev1.PodPhase]int{corev1.PodPending: 0, corev1.PodUnknown: 1, corev1.PodRunning: 2} + if m[s[i].Status.Phase] != m[s[j].Status.Phase] { + return m[s[i].Status.Phase] < m[s[j].Status.Phase] + } + // 3. Not ready < ready + // If only one of the pods is not ready, the not ready one is smaller + if IsPodReady(s[i]) != IsPodReady(s[j]) { + return !IsPodReady(s[i]) + } + // TODO: take availability into account when we push minReadySeconds information from deployment into pods, + // see https://github.com/kubernetes/kubernetes/issues/22065 + // 4. Been ready for empty time < less time < more time + // If both pods are ready, the latest ready one is smaller + if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j])) + } + // 5. Pods with containers with higher restart counts < lower restart counts + if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { + return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) + } + // 6. Empty creation time pods < newer pods < older pods + if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) { + return afterOrZero(&s[i].CreationTimestamp, &s[j].CreationTimestamp) + } + return false +} + +// afterOrZero checks if time t1 is after time t2; if one of them +// is zero, the zero time is seen as after non-zero time. +func afterOrZero(t1, t2 *metav1.Time) bool { + if t1.Time.IsZero() || t2.Time.IsZero() { + return t1.Time.IsZero() + } + return t1.After(t2.Time) +} + +func podReadyTime(pod *corev1.Pod) *metav1.Time { + if IsPodReady(pod) { + for _, c := range pod.Status.Conditions { + // we only care about pod ready conditions + if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue { + return &c.LastTransitionTime + } + } + } + return &metav1.Time{} +} + +func maxContainerRestarts(pod *corev1.Pod) int { + maxRestarts := 0 + for _, c := range pod.Status.ContainerStatuses { + maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount)) + } + return maxRestarts +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/printers/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/printers/BUILD.bazel new file mode 100644 index 0000000000000..eef569cb49cb2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/printers/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["tabwriter.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/printers", + importpath = "k8s.io/kubernetes/pkg/kubectl/util/printers", + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/printers/tabwriter.go similarity index 52% rename from vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/defaults.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/util/printers/tabwriter.go index 8949bb87cd716..aadb888b7eff5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/printers/tabwriter.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package printers import ( - "k8s.io/apimachinery/pkg/runtime" + "io" + "text/tabwriter" ) -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) +const ( + tabwriterMinWidth = 6 + tabwriterWidth = 4 + tabwriterPadding = 3 + tabwriterPadChar = ' ' + tabwriterFlags = 0 +) + +// GetNewTabWriter returns a tabwriter that translates tabbed columns in input into properly aligned text. +func GetNewTabWriter(output io.Writer) *tabwriter.Writer { + return tabwriter.NewWriter(output, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/BUILD.bazel similarity index 60% rename from vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/BUILD.bazel rename to vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/BUILD.bazel index 38d7dd9924696..a85e6abd1e223 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/BUILD.bazel @@ -3,12 +3,12 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["qos.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos", - importpath = "k8s.io/kubernetes/pkg/apis/core/helper/qos", + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos", + importpath = "k8s.io/kubernetes/pkg/kubectl/util/qos", visibility = ["//visibility:public"], deps = [ + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/qos.go similarity index 78% rename from vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/qos.go index fad6fb2407451..73b25acac2d68 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/qos.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,19 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -// NOTE: DO NOT use those helper functions through client-go, the -// package path will be changed in the future. package qos import ( + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/apis/core" ) -var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) +var supportedQoSComputeResources = sets.NewString(string(corev1.ResourceCPU), string(corev1.ResourceMemory)) -func isSupportedQoSComputeResource(name core.ResourceName) bool { +func isSupportedQoSComputeResource(name corev1.ResourceName) bool { return supportedQoSComputeResources.Has(string(name)) } @@ -34,9 +32,9 @@ func isSupportedQoSComputeResource(name core.ResourceName) bool { // A pod is besteffort if none of its containers have specified any requests or limits. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is burstable if limits and requests do not match across all containers. -func GetPodQOS(pod *core.Pod) core.PodQOSClass { - requests := core.ResourceList{} - limits := core.ResourceList{} +func GetPodQOS(pod *corev1.Pod) corev1.PodQOSClass { + requests := corev1.ResourceList{} + limits := corev1.ResourceList{} zeroQuantity := resource.MustParse("0") isGuaranteed := true for _, container := range pod.Spec.Containers { @@ -73,12 +71,12 @@ func GetPodQOS(pod *core.Pod) core.PodQOSClass { } } - if !qosLimitsFound.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) { + if !qosLimitsFound.HasAll(string(corev1.ResourceMemory), string(corev1.ResourceCPU)) { isGuaranteed = false } } if len(requests) == 0 && len(limits) == 0 { - return core.PodQOSBestEffort + return corev1.PodQOSBestEffort } // Check is requests match limits for all resources. if isGuaranteed { @@ -91,7 +89,7 @@ func GetPodQOS(pod *core.Pod) core.PodQOSClass { } if isGuaranteed && len(requests) == len(limits) { - return core.PodQOSGuaranteed + return corev1.PodQOSGuaranteed } - return core.PodQOSBurstable + return corev1.PodQOSBurstable } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/BUILD.bazel new file mode 100644 index 0000000000000..031fff4788165 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["rbac.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac", + importpath = "k8s.io/kubernetes/pkg/kubectl/util/rbac", + visibility = ["//visibility:public"], + deps = ["//vendor/k8s.io/api/rbac/v1:go_default_library"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/rbac.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/rbac.go new file mode 100644 index 0000000000000..a149a51afb080 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/rbac.go @@ -0,0 +1,128 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + rbacv1 "k8s.io/api/rbac/v1" + "reflect" + "strings" +) + +type simpleResource struct { + Group string + Resource string + ResourceNameExist bool + ResourceName string +} + +// CompactRules combines rules that contain a single APIGroup/Resource, differ only by verb, and contain no other attributes. +// this is a fast check, and works well with the decomposed "missing rules" list from a Covers check. +func CompactRules(rules []rbacv1.PolicyRule) ([]rbacv1.PolicyRule, error) { + compacted := make([]rbacv1.PolicyRule, 0, len(rules)) + + simpleRules := map[simpleResource]*rbacv1.PolicyRule{} + for _, rule := range rules { + if resource, isSimple := isSimpleResourceRule(&rule); isSimple { + if existingRule, ok := simpleRules[resource]; ok { + // Add the new verbs to the existing simple resource rule + if existingRule.Verbs == nil { + existingRule.Verbs = []string{} + } + existingRule.Verbs = append(existingRule.Verbs, rule.Verbs...) + } else { + // Copy the rule to accumulate matching simple resource rules into + simpleRules[resource] = rule.DeepCopy() + } + } else { + compacted = append(compacted, rule) + } + } + + // Once we've consolidated the simple resource rules, add them to the compacted list + for _, simpleRule := range simpleRules { + compacted = append(compacted, *simpleRule) + } + + return compacted, nil +} + +// isSimpleResourceRule returns true if the given rule contains verbs, a single resource, a single API group, at most one Resource Name, and no other values +func isSimpleResourceRule(rule *rbacv1.PolicyRule) (simpleResource, bool) { + resource := simpleResource{} + + // If we have "complex" rule attributes, return early without allocations or expensive comparisons + if len(rule.ResourceNames) > 1 || len(rule.NonResourceURLs) > 0 { + return resource, false + } + // If we have multiple api groups or resources, return early + if len(rule.APIGroups) != 1 || len(rule.Resources) != 1 { + return resource, false + } + + // Test if this rule only contains APIGroups/Resources/Verbs/ResourceNames + simpleRule := &rbacv1.PolicyRule{APIGroups: rule.APIGroups, Resources: rule.Resources, Verbs: rule.Verbs, ResourceNames: rule.ResourceNames} + if !reflect.DeepEqual(simpleRule, rule) { + return resource, false + } + + if len(rule.ResourceNames) == 0 { + resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: false} + } else { + resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: true, ResourceName: rule.ResourceNames[0]} + } + + return resource, true +} + +// BreakdownRule takes a rule and builds an equivalent list of rules that each have at most one verb, one +// resource, and one resource name +func BreakdownRule(rule rbacv1.PolicyRule) []rbacv1.PolicyRule { + subrules := []rbacv1.PolicyRule{} + for _, group := range rule.APIGroups { + for _, resource := range rule.Resources { + for _, verb := range rule.Verbs { + if len(rule.ResourceNames) > 0 { + for _, resourceName := range rule.ResourceNames { + subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}, ResourceNames: []string{resourceName}}) + } + + } else { + subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}}) + } + + } + } + } + + // Non-resource URLs are unique because they only combine with verbs. + for _, nonResourceURL := range rule.NonResourceURLs { + for _, verb := range rule.Verbs { + subrules = append(subrules, rbacv1.PolicyRule{NonResourceURLs: []string{nonResourceURL}, Verbs: []string{verb}}) + } + } + + return subrules +} + +// SortableRuleSlice is used to sort rule slice +type SortableRuleSlice []rbacv1.PolicyRule + +func (s SortableRuleSlice) Len() int { return len(s) } +func (s SortableRuleSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s SortableRuleSlice) Less(i, j int) bool { + return strings.Compare(s[i].String(), s[j].String()) < 0 +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/resource/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/BUILD.bazel similarity index 54% rename from vendor/k8s.io/kubernetes/pkg/api/v1/resource/BUILD.bazel rename to vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/BUILD.bazel index fb63eae142a24..edceac6eebe7a 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/resource/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/BUILD.bazel @@ -2,12 +2,13 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["helpers.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/api/v1/resource", - importpath = "k8s.io/kubernetes/pkg/api/v1/resource", + srcs = ["resource.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource", + importpath = "k8s.io/kubernetes/pkg/kubectl/util/resource", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/resource.go similarity index 75% rename from vendor/k8s.io/kubernetes/pkg/api/resource/helpers.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/resource.go index 4daf3e925433c..0a6f177a64710 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/resource.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,13 +20,31 @@ import ( "fmt" "math" "strconv" + "strings" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/apimachinery/pkg/util/sets" ) +// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all +// containers of the pod. +func PodRequestsAndLimits(pod *corev1.Pod) (reqs, limits corev1.ResourceList) { + reqs, limits = corev1.ResourceList{}, corev1.ResourceList{} + for _, container := range pod.Spec.Containers { + addResourceList(reqs, container.Resources.Requests) + addResourceList(limits, container.Resources.Limits) + } + // init containers define the minimum of any resource + for _, container := range pod.Spec.InitContainers { + maxResourceList(reqs, container.Resources.Requests) + maxResourceList(limits, container.Resources.Limits) + } + return +} + // addResourceList adds the resources in newList to list -func addResourceList(list, new api.ResourceList) { +func addResourceList(list, new corev1.ResourceList) { for name, quantity := range new { if value, ok := list[name]; !ok { list[name] = *quantity.Copy() @@ -39,7 +57,7 @@ func addResourceList(list, new api.ResourceList) { // maxResourceList sets list to the greater of list/newList for every resource // either list -func maxResourceList(list, new api.ResourceList) { +func maxResourceList(list, new corev1.ResourceList) { for name, quantity := range new { if value, ok := list[name]; !ok { list[name] = *quantity.Copy() @@ -52,25 +70,9 @@ func maxResourceList(list, new api.ResourceList) { } } -// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all -// containers of the pod. -func PodRequestsAndLimits(pod *api.Pod) (reqs api.ResourceList, limits api.ResourceList) { - reqs, limits = api.ResourceList{}, api.ResourceList{} - for _, container := range pod.Spec.Containers { - addResourceList(reqs, container.Resources.Requests) - addResourceList(limits, container.Resources.Limits) - } - // init containers define the minimum of any resource - for _, container := range pod.Spec.InitContainers { - maxResourceList(reqs, container.Resources.Requests) - maxResourceList(limits, container.Resources.Limits) - } - return -} - // ExtractContainerResourceValue extracts the value of a resource // in an already known container -func ExtractContainerResourceValue(fs *api.ResourceFieldSelector, container *api.Container) (string, error) { +func ExtractContainerResourceValue(fs *corev1.ResourceFieldSelector, container *corev1.Container) (string, error) { divisor := resource.Quantity{} if divisor.Cmp(fs.Divisor) == 0 { divisor = resource.MustParse("1") @@ -93,7 +95,7 @@ func ExtractContainerResourceValue(fs *api.ResourceFieldSelector, container *api return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor) } - return "", fmt.Errorf("unsupported container resource : %v", fs.Resource) + return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) } // convertResourceCPUToString converts cpu value to the format of divisor and returns @@ -116,3 +118,21 @@ func convertResourceEphemeralStorageToString(ephemeralStorage *resource.Quantity m := int64(math.Ceil(float64(ephemeralStorage.Value()) / float64(divisor.Value()))) return strconv.FormatInt(m, 10), nil } + +var standardContainerResources = sets.NewString( + string(corev1.ResourceCPU), + string(corev1.ResourceMemory), + string(corev1.ResourceEphemeralStorage), +) + +// IsStandardContainerResourceName returns true if the container can make a resource request +// for the specified resource +func IsStandardContainerResourceName(str string) bool { + return standardContainerResources.Has(str) || IsHugePageResourceName(corev1.ResourceName(str)) +} + +// IsHugePageResourceName returns true if the resource name has the huge page +// resource prefix. +func IsHugePageResourceName(name corev1.ResourceName) bool { + return strings.HasPrefix(string(name), corev1.ResourceHugePagesPrefix) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/service_port.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/service_port.go index 8c9caf91d8928..bc56ab7d6a909 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/service_port.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/service_port.go @@ -23,21 +23,8 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) -// Lookup containerPort number by its named port name -func lookupContainerPortNumberByName(pod v1.Pod, name string) (int32, error) { - for _, ctr := range pod.Spec.Containers { - for _, ctrportspec := range ctr.Ports { - if ctrportspec.Name == name { - return ctrportspec.ContainerPort, nil - } - } - } - - return int32(-1), fmt.Errorf("Pod '%s' does not have a named port '%s'", pod.Name, name) -} - -// Lookup containerPort number from Service port number -// It implements the handling of resolving container named port, as well as ignoring targetPort when clusterIP=None +// LookupContainerPortNumberByServicePort implements +// the handling of resolving container named port, as well as ignoring targetPort when clusterIP=None // It returns an error when a named port can't find a match (with -1 returned), or when the service does not // declare such port (with the input port number returned). func LookupContainerPortNumberByServicePort(svc v1.Service, pod v1.Pod, port int32) (int32, error) { @@ -52,12 +39,21 @@ func LookupContainerPortNumberByServicePort(svc v1.Service, pod v1.Pod, port int if svcportspec.TargetPort.IntValue() == 0 { // targetPort is omitted, and the IntValue() would be zero return svcportspec.Port, nil - } else { - return int32(svcportspec.TargetPort.IntValue()), nil } - } else { - return lookupContainerPortNumberByName(pod, svcportspec.TargetPort.String()) + return int32(svcportspec.TargetPort.IntValue()), nil } + return LookupContainerPortNumberByName(pod, svcportspec.TargetPort.String()) } return port, fmt.Errorf("Service %s does not have a service port %d", svc.Name, port) } + +// LookupServicePortNumberByName find service port number by its named port name +func LookupServicePortNumberByName(svc v1.Service, name string) (int32, error) { + for _, svcportspec := range svc.Spec.Ports { + if svcportspec.Name == name { + return svcportspec.Port, nil + } + } + + return int32(-1), fmt.Errorf("Service '%s' does not have a named port '%s'", svc.Name, name) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/slice.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/slice.go index 8130753c300f9..f997d5cb4059f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/slice.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/slice.go @@ -20,5 +20,19 @@ import ( "sort" ) -// Sorts []int64 in increasing order +// SortInts64 sorts []int64 in increasing order func SortInts64(a []int64) { sort.Slice(a, func(i, j int) bool { return a[i] < a[j] }) } + +// ContainsString checks if a given slice of strings contains the provided string. +// If a modifier func is provided, it is called with the slice item before the comparation. +func ContainsString(slice []string, s string, modifier func(s string) string) bool { + for _, item := range slice { + if item == s { + return true + } + if modifier != nil && modifier(item) == s { + return true + } + } + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/BUILD.bazel new file mode 100644 index 0000000000000..28b358b182eff --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["storage.go"], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage", + importpath = "k8s.io/kubernetes/pkg/kubectl/util/storage", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/storage.go new file mode 100644 index 0000000000000..c62cc4ea1270d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/storage.go @@ -0,0 +1,107 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" +) + +// TODO(yue9944882): Remove this helper package once it's copied to k/api + +// IsDefaultStorageClassAnnotation represents a StorageClass annotation that +// marks a class as the default StorageClass +const IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class" + +// BetaIsDefaultStorageClassAnnotation is the beta version of BetaIsDefaultStorageClassAnnotation. +const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" + +// IsDefaultAnnotationText returns a pretty Yes/No String if +// the annotation is set +func IsDefaultAnnotationText(obj metav1.ObjectMeta) string { + if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { + return "Yes" + } + if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { + return "Yes" + } + + return "No" +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, v1.ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, v1.ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, v1.ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode { + accessModes := []v1.PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/BUILD.bazel similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/BUILD.bazel rename to vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/BUILD.bazel index 589fac728d5e1..9523ec7d467d8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/BUILD.bazel @@ -9,8 +9,8 @@ go_library( "templater.go", "templates.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates", - importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/templates", + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates", + importpath = "k8s.io/kubernetes/pkg/kubectl/util/templates", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/MakeNowJust/heredoc:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/command_groups.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/command_groups.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/command_groups.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/command_groups.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/markdown.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/markdown.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/markdown.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/markdown.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/normalizers.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/normalizers.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/normalizers.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/normalizers.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/templater.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/templater.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/templater.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/templater.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/templates.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/templates.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/templates.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/templates.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask.go index 93e14473c225a..67add4e9140a1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask.go @@ -22,6 +22,7 @@ import ( "golang.org/x/sys/unix" ) +// Umask is a wrapper for `unix.Umask()` on non-Windows platforms func Umask(mask int) (old int, err error) { return unix.Umask(mask), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask_windows.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask_windows.go index 7a1ba15386f8a..5b4f54bb795b3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask_windows.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask_windows.go @@ -22,6 +22,7 @@ import ( "errors" ) +// Umask returns an error on Windows func Umask(mask int) (int, error) { return 0, errors.New("platform and architecture is not supported") } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go index 41427780c71d5..0c1973dfb8ed3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go @@ -41,6 +41,7 @@ func ParseRFC3339(s string, nowFn func() metav1.Time) (metav1.Time, error) { return metav1.Time{Time: t}, nil } +// HashObject returns the hash of a Object hash by a Codec func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) { data, err := runtime.Encode(codec, obj) if err != nil { @@ -63,11 +64,11 @@ func ParseFileSource(source string) (keyName, filePath string, err error) { case numSeparators == 0: return path.Base(filepath.ToSlash(source)), source, nil case numSeparators == 1 && strings.HasPrefix(source, "="): - return "", "", fmt.Errorf("key name for file path %v missing.", strings.TrimPrefix(source, "=")) + return "", "", fmt.Errorf("key name for file path %v missing", strings.TrimPrefix(source, "=")) case numSeparators == 1 && strings.HasSuffix(source, "="): - return "", "", fmt.Errorf("file path for key name %v missing.", strings.TrimSuffix(source, "=")) + return "", "", fmt.Errorf("file path for key name %v missing", strings.TrimSuffix(source, "=")) case numSeparators > 1: - return "", "", errors.New("Key names or file paths cannot contain '='.") + return "", "", errors.New("Key names or file paths cannot contain '='") default: components := strings.Split(source, "=") return components[0], components[1], nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD.bazel index 1612368530375..0eaa01a5c8462 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD.bazel @@ -10,7 +10,9 @@ go_library( importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubelet/apis", importpath = "k8s.io/kubernetes/pkg/kubelet/apis", visibility = ["//visibility:public"], - deps = select({ + deps = [ + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ] + select({ "@io_bazel_rules_go//go/platform:windows": [ "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/kubernetes/pkg/features:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go index 5a0db552c82be..869952556dde7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go @@ -16,6 +16,12 @@ limitations under the License. package apis +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + const ( LabelHostname = "kubernetes.io/hostname" LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" @@ -26,8 +32,78 @@ const ( LabelOS = "beta.kubernetes.io/os" LabelArch = "beta.kubernetes.io/arch" + + // GA versions of the legacy beta labels. + // TODO: update kubelet and controllers to set both beta and GA labels, then export these constants + labelZoneFailureDomainGA = "failure-domain.kubernetes.io/zone" + labelZoneRegionGA = "failure-domain.kubernetes.io/region" + labelInstanceTypeGA = "kubernetes.io/instance-type" + labelOSGA = "kubernetes.io/os" + labelArchGA = "kubernetes.io/arch" + + // LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*) + LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io" + // LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*) + LabelNamespaceSuffixNode = "node.kubernetes.io" + + // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled + LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io" ) // When the --failure-domains scheduler flag is not specified, // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. var DefaultFailureDomains string = LabelHostname + "," + LabelZoneFailureDomain + "," + LabelZoneRegion + +var kubeletLabels = sets.NewString( + LabelHostname, + LabelZoneFailureDomain, + LabelZoneRegion, + LabelInstanceType, + LabelOS, + LabelArch, + + labelZoneFailureDomainGA, + labelZoneRegionGA, + labelInstanceTypeGA, + labelOSGA, + labelArchGA, +) + +var kubeletLabelNamespaces = sets.NewString( + LabelNamespaceSuffixKubelet, + LabelNamespaceSuffixNode, +) + +// KubeletLabels returns the list of label keys kubelets are allowed to set on their own Node objects +func KubeletLabels() []string { + return kubeletLabels.List() +} + +// KubeletLabelNamespaces returns the list of label key namespaces kubelets are allowed to set on their own Node objects +func KubeletLabelNamespaces() []string { + return kubeletLabelNamespaces.List() +} + +// IsKubeletLabel returns true if the label key is one that kubelets are allowed to set on their own Node object. +// This checks if the key is in the KubeletLabels() list, or has a namespace in the KubeletLabelNamespaces() list. +func IsKubeletLabel(key string) bool { + if kubeletLabels.Has(key) { + return true + } + + namespace := getLabelNamespace(key) + for allowedNamespace := range kubeletLabelNamespaces { + if namespace == allowedNamespace || strings.HasSuffix(namespace, "."+allowedNamespace) { + return true + } + } + + return false +} + +func getLabelNamespace(key string) string { + if parts := strings.SplitN(key, "/", 2); len(parts) == 2 { + return parts[0] + } + return "" +} diff --git a/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go b/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go index 19207a1012b95..23faba1d3ec5e 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go +++ b/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go @@ -23,9 +23,10 @@ const ( // KubeletPort is the default port for the kubelet server on each host machine. // May be overridden by a flag at startup. KubeletPort = 10250 - // SchedulerPort is the default port for the scheduler status server. + // InsecureSchedulerPort is the default port for the scheduler status server. // May be overridden by a flag at startup. - SchedulerPort = 10251 + // Deprecated: use the secure KubeSchedulerPort instead. + InsecureSchedulerPort = 10251 // InsecureKubeControllerManagerPort is the default port for the controller manager status server. // May be overridden by a flag at startup. // Deprecated: use the secure KubeControllerManagerPort instead. @@ -49,4 +50,8 @@ const ( // CloudControllerManagerPort is the default port for the cloud controller manager server. // This value may be overridden by a flag at startup. CloudControllerManagerPort = 10258 + + // KubeSchedulerPort is the default port for the scheduler status server. + // May be overridden by a flag at startup. + KubeSchedulerPort = 10259 ) diff --git a/vendor/k8s.io/kubernetes/pkg/printers/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/printers/BUILD.bazel index 5a32e17a9b97b..b4154db3811e0 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/printers/BUILD.bazel @@ -3,8 +3,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "customcolumn.go", - "customcolumn_flags.go", "humanreadable.go", "interface.go", "tabwriter.go", @@ -13,7 +11,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/printers", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", @@ -22,9 +19,5 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", - "//vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", - "//vendor/k8s.io/client-go/util/jsonpath:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/printers/interface.go b/vendor/k8s.io/kubernetes/pkg/printers/interface.go index f528de5caa617..6e14f61c5beb6 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/printers/interface.go @@ -17,7 +17,6 @@ limitations under the License. package printers import ( - "fmt" "io" "k8s.io/apimachinery/pkg/runtime" @@ -58,36 +57,3 @@ type PrintOptions struct { // indicates if it is OK to ignore missing keys for rendering an output template. AllowMissingKeys bool } - -// Describer generates output for the named resource or an error -// if the output could not be generated. Implementers typically -// abstract the retrieval of the named object from a remote server. -type Describer interface { - Describe(namespace, name string, describerSettings DescriberSettings) (output string, err error) -} - -// DescriberSettings holds display configuration for each object -// describer to control what is printed. -type DescriberSettings struct { - ShowEvents bool -} - -// ObjectDescriber is an interface for displaying arbitrary objects with extra -// information. Use when an object is in hand (on disk, or already retrieved). -// Implementers may ignore the additional information passed on extra, or use it -// by default. ObjectDescribers may return ErrNoDescriber if no suitable describer -// is found. -type ObjectDescriber interface { - DescribeObject(object interface{}, extra ...interface{}) (output string, err error) -} - -// ErrNoDescriber is a structured error indicating the provided object or objects -// cannot be described. -type ErrNoDescriber struct { - Types []string -} - -// Error implements the error interface. -func (e ErrNoDescriber) Error() string { - return fmt.Sprintf("no describer has been defined for %v", e.Types) -} diff --git a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD.bazel index a127d98e97e6e..526e67bb89911 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD.bazel @@ -3,16 +3,13 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "describe.go", + "import_known_versions.go", "printers.go", ], importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/printers/internalversion", importpath = "k8s.io/kubernetes/pkg/printers/internalversion", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/fatih/camelcase:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", @@ -21,52 +18,48 @@ go_library( "//vendor/k8s.io/api/coordination/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/api/rbac/v1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", + "//vendor/k8s.io/api/scheduling/v1beta1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/duration:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/client-go/dynamic:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/events:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/ref:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/api/resource:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/apps/install:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/authentication/install:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/authorization/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/batch:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/batch/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/certificates:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/certificates/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/coordination:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/coordination/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core/helper:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/core/install:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/events/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/extensions/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/networking:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/policy:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/policy/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/rbac:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/rbac/v1:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/rbac/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/scheduling:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/scheduling/install:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/settings/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/storage:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/storage/install:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/storage/util:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/controller/deployment/util:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/fieldpath:go_default_library", "//vendor/k8s.io/kubernetes/pkg/printers:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/registry/rbac/validation:go_default_library", "//vendor/k8s.io/kubernetes/pkg/util/node:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/util/slice:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/import_known_versions.go b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/import_known_versions.go new file mode 100644 index 0000000000000..154e6c76a6c3d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/import_known_versions.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +// These imports are the API groups the client will support. +// TODO: Remove these manual install once we don't need legacy scheme in get comman +import ( + _ "k8s.io/kubernetes/pkg/apis/apps/install" + _ "k8s.io/kubernetes/pkg/apis/authentication/install" + _ "k8s.io/kubernetes/pkg/apis/authorization/install" + _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" + _ "k8s.io/kubernetes/pkg/apis/batch/install" + _ "k8s.io/kubernetes/pkg/apis/certificates/install" + _ "k8s.io/kubernetes/pkg/apis/coordination/install" + _ "k8s.io/kubernetes/pkg/apis/core/install" + _ "k8s.io/kubernetes/pkg/apis/events/install" + _ "k8s.io/kubernetes/pkg/apis/extensions/install" + _ "k8s.io/kubernetes/pkg/apis/policy/install" + _ "k8s.io/kubernetes/pkg/apis/rbac/install" + _ "k8s.io/kubernetes/pkg/apis/scheduling/install" + _ "k8s.io/kubernetes/pkg/apis/settings/install" + _ "k8s.io/kubernetes/pkg/apis/storage/install" +) diff --git a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/printers.go b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/printers.go index cb4a3e07e97c5..70dadf902cc5a 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/printers.go +++ b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/printers.go @@ -19,8 +19,8 @@ package internalversion import ( "bytes" "fmt" - "io" "net" + "sort" "strconv" "strings" "time" @@ -33,7 +33,9 @@ import ( coordinationv1beta1 "k8s.io/api/coordination/v1beta1" apiv1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -54,6 +56,7 @@ import ( "k8s.io/kubernetes/pkg/apis/networking" "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/pkg/apis/storage" storageutil "k8s.io/kubernetes/pkg/apis/storage/util" "k8s.io/kubernetes/pkg/printers" @@ -83,6 +86,7 @@ func AddHandlers(h printers.PrintHandler) { {Name: "IP", Type: "string", Priority: 1, Description: apiv1.PodStatus{}.SwaggerDoc()["podIP"]}, {Name: "Node", Type: "string", Priority: 1, Description: apiv1.PodSpec{}.SwaggerDoc()["nodeName"]}, {Name: "Nominated Node", Type: "string", Priority: 1, Description: apiv1.PodStatus{}.SwaggerDoc()["nominatedNodeName"]}, + {Name: "Readiness Gates", Type: "string", Priority: 1, Description: apiv1.PodSpec{}.SwaggerDoc()["readinessGates"]}, } h.TableHandler(podColumnDefinitions, printPodList) h.TableHandler(podColumnDefinitions, printPod) @@ -199,8 +203,7 @@ func AddHandlers(h printers.PrintHandler) { statefulSetColumnDefinitions := []metav1beta1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Desired", Type: "string", Description: appsv1beta1.StatefulSetSpec{}.SwaggerDoc()["replicas"]}, - {Name: "Current", Type: "string", Description: appsv1beta1.StatefulSetStatus{}.SwaggerDoc()["replicas"]}, + {Name: "Ready", Type: "string", Description: "Number of the pod with ready state"}, {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, {Name: "Containers", Type: "string", Priority: 1, Description: "Names of each container in the template."}, {Name: "Images", Type: "string", Priority: 1, Description: "Images referenced by each container in the template."}, @@ -309,8 +312,7 @@ func AddHandlers(h printers.PrintHandler) { deploymentColumnDefinitions := []metav1beta1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Desired", Type: "string", Description: extensionsv1beta1.DeploymentSpec{}.SwaggerDoc()["replicas"]}, - {Name: "Current", Type: "string", Description: extensionsv1beta1.DeploymentStatus{}.SwaggerDoc()["replicas"]}, + {Name: "Ready", Type: "string", Description: "Number of the pod with ready state"}, {Name: "Up-to-date", Type: "string", Description: extensionsv1beta1.DeploymentStatus{}.SwaggerDoc()["updatedReplicas"]}, {Name: "Available", Type: "string", Description: extensionsv1beta1.DeploymentStatus{}.SwaggerDoc()["availableReplicas"]}, {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, @@ -343,14 +345,14 @@ func AddHandlers(h printers.PrintHandler) { podSecurityPolicyColumnDefinitions := []metav1beta1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Priv", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["privileged"]}, - {Name: "Caps", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["allowedCapabilities"]}, - {Name: "SELinux", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["seLinux"]}, - {Name: "RunAsUser", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["runAsUser"]}, - {Name: "FsGroup", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["fsGroup"]}, - {Name: "SupGroup", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["supplementalGroups"]}, - {Name: "ReadOnlyRootFs", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["readOnlyRootFilesystem"]}, - {Name: "Volumes", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["volumes"]}, + {Name: "Priv", Type: "string", Description: policyv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["privileged"]}, + {Name: "Caps", Type: "string", Description: policyv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["allowedCapabilities"]}, + {Name: "SELinux", Type: "string", Description: policyv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["seLinux"]}, + {Name: "RunAsUser", Type: "string", Description: policyv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["runAsUser"]}, + {Name: "FsGroup", Type: "string", Description: policyv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["fsGroup"]}, + {Name: "SupGroup", Type: "string", Description: policyv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["supplementalGroups"]}, + {Name: "ReadOnlyRootFs", Type: "string", Description: policyv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["readOnlyRootFilesystem"]}, + {Name: "Volumes", Type: "string", Description: policyv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["volumes"]}, } h.TableHandler(podSecurityPolicyColumnDefinitions, printPodSecurityPolicy) h.TableHandler(podSecurityPolicyColumnDefinitions, printPodSecurityPolicyList) @@ -428,6 +430,24 @@ func AddHandlers(h printers.PrintHandler) { h.TableHandler(controllerRevisionColumnDefinition, printControllerRevision) h.TableHandler(controllerRevisionColumnDefinition, printControllerRevisionList) + resorceQuotaColumnDefinitions := []metav1beta1.TableColumnDefinition{ + {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, + {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, + {Name: "Request", Type: "string", Description: "Request represents a minimum amount of cpu/memory that a container may consume."}, + {Name: "Limit", Type: "string", Description: "Limits control the maximum amount of cpu/memory that a container may use independent of contention on the node."}, + } + h.TableHandler(resorceQuotaColumnDefinitions, printResourceQuota) + h.TableHandler(resorceQuotaColumnDefinitions, printResourceQuotaList) + + priorityClassColumnDefinitions := []metav1beta1.TableColumnDefinition{ + {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, + {Name: "Value", Type: "integer", Description: schedulingv1beta1.PriorityClass{}.SwaggerDoc()["value"]}, + {Name: "Global-Default", Type: "boolean", Description: schedulingv1beta1.PriorityClass{}.SwaggerDoc()["globalDefault"]}, + {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, + } + h.TableHandler(priorityClassColumnDefinitions, printPriorityClass) + h.TableHandler(priorityClassColumnDefinitions, printPriorityClassList) + AddDefaultHandlers(h) } @@ -642,11 +662,11 @@ func printPod(pod *api.Pod, options printers.PrintOptions) ([]metav1beta1.TableR } row.Cells = append(row.Cells, pod.Name, fmt.Sprintf("%d/%d", readyContainers, totalContainers), reason, int64(restarts), translateTimestampSince(pod.CreationTimestamp)) - if options.Wide { nodeName := pod.Spec.NodeName nominatedNodeName := pod.Status.NominatedNodeName podIP := pod.Status.PodIP + if podIP == "" { podIP = "" } @@ -656,7 +676,24 @@ func printPod(pod *api.Pod, options printers.PrintOptions) ([]metav1beta1.TableR if nominatedNodeName == "" { nominatedNodeName = "" } - row.Cells = append(row.Cells, podIP, nodeName, nominatedNodeName) + + readinessGates := "" + if len(pod.Spec.ReadinessGates) > 0 { + trueConditions := 0 + for _, readinessGate := range pod.Spec.ReadinessGates { + conditionType := readinessGate.ConditionType + for _, condition := range pod.Status.Conditions { + if condition.Type == conditionType { + if condition.Status == api.ConditionTrue { + trueConditions += 1 + } + break + } + } + } + readinessGates = fmt.Sprintf("%d/%d", trueConditions, len(pod.Spec.ReadinessGates)) + } + row.Cells = append(row.Cells, podIP, nodeName, nominatedNodeName, readinessGates) } return []metav1beta1.TableRow{row}, nil @@ -748,7 +785,7 @@ func printReplicationControllerList(list *api.ReplicationControllerList, options return rows, nil } -func printReplicaSet(obj *extensions.ReplicaSet, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { +func printReplicaSet(obj *apps.ReplicaSet, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { row := metav1beta1.TableRow{ Object: runtime.RawExtension{Object: obj}, } @@ -765,7 +802,7 @@ func printReplicaSet(obj *extensions.ReplicaSet, options printers.PrintOptions) return []metav1beta1.TableRow{row}, nil } -func printReplicaSetList(list *extensions.ReplicaSetList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { +func printReplicaSetList(list *apps.ReplicaSetList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { rows := make([]metav1beta1.TableRow, 0, len(list.Items)) for i := range list.Items { r, err := printReplicaSet(&list.Items[i], options) @@ -1020,9 +1057,9 @@ func printStatefulSet(obj *apps.StatefulSet, options printers.PrintOptions) ([]m Object: runtime.RawExtension{Object: obj}, } desiredReplicas := obj.Spec.Replicas - currentReplicas := obj.Status.Replicas + readyReplicas := obj.Status.ReadyReplicas createTime := translateTimestampSince(obj.CreationTimestamp) - row.Cells = append(row.Cells, obj.Name, int64(desiredReplicas), int64(currentReplicas), createTime) + row.Cells = append(row.Cells, obj.Name, fmt.Sprintf("%d/%d", int64(readyReplicas), int64(desiredReplicas)), createTime) if options.Wide { names, images := layoutContainerCells(obj.Spec.Template.Spec.Containers) row.Cells = append(row.Cells, names, images) @@ -1042,7 +1079,7 @@ func printStatefulSetList(list *apps.StatefulSetList, options printers.PrintOpti return rows, nil } -func printDaemonSet(obj *extensions.DaemonSet, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { +func printDaemonSet(obj *apps.DaemonSet, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { row := metav1beta1.TableRow{ Object: runtime.RawExtension{Object: obj}, } @@ -1061,7 +1098,7 @@ func printDaemonSet(obj *extensions.DaemonSet, options printers.PrintOptions) ([ return []metav1beta1.TableRow{row}, nil } -func printDaemonSetList(list *extensions.DaemonSetList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { +func printDaemonSetList(list *apps.DaemonSetList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { rows := make([]metav1beta1.TableRow, 0, len(list.Items)) for i := range list.Items { r, err := printDaemonSet(&list.Items[i], options) @@ -1527,20 +1564,13 @@ func printComponentStatusList(list *api.ComponentStatusList, options printers.Pr return rows, nil } -func truncate(str string, maxLen int) string { - if len(str) > maxLen { - return str[0:maxLen] + "..." - } - return str -} - -func printDeployment(obj *extensions.Deployment, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { +func printDeployment(obj *apps.Deployment, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { row := metav1beta1.TableRow{ Object: runtime.RawExtension{Object: obj}, } desiredReplicas := obj.Spec.Replicas - currentReplicas := obj.Status.Replicas updatedReplicas := obj.Status.UpdatedReplicas + readyReplicas := obj.Status.ReadyReplicas availableReplicas := obj.Status.AvailableReplicas age := translateTimestampSince(obj.CreationTimestamp) containers := obj.Spec.Template.Spec.Containers @@ -1549,7 +1579,7 @@ func printDeployment(obj *extensions.Deployment, options printers.PrintOptions) // this shouldn't happen if LabelSelector passed validation return nil, err } - row.Cells = append(row.Cells, obj.Name, int64(desiredReplicas), int64(currentReplicas), int64(updatedReplicas), int64(availableReplicas), age) + row.Cells = append(row.Cells, obj.Name, fmt.Sprintf("%d/%d", int64(readyReplicas), int64(desiredReplicas)), int64(updatedReplicas), int64(availableReplicas), age) if options.Wide { containers, images := layoutContainerCells(containers) row.Cells = append(row.Cells, containers, images, selector.String()) @@ -1557,7 +1587,7 @@ func printDeployment(obj *extensions.Deployment, options printers.PrintOptions) return []metav1beta1.TableRow{row}, nil } -func printDeploymentList(list *extensions.DeploymentList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { +func printDeploymentList(list *apps.DeploymentList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { rows := make([]metav1beta1.TableRow, 0, len(list.Items)) for i := range list.Items { r, err := printDeployment(&list.Items[i], options) @@ -1808,24 +1838,6 @@ func printStatus(obj *metav1.Status, options printers.PrintOptions) ([]metav1bet return []metav1beta1.TableRow{row}, nil } -// Lay out all the containers on one line if use wide output. -// DEPRECATED: convert to TableHandler and use layoutContainerCells -func layoutContainers(containers []api.Container, w io.Writer) error { - var namesBuffer bytes.Buffer - var imagesBuffer bytes.Buffer - - for i, container := range containers { - namesBuffer.WriteString(container.Name) - imagesBuffer.WriteString(container.Image) - if i != len(containers)-1 { - namesBuffer.WriteString(",") - imagesBuffer.WriteString(",") - } - } - _, err := fmt.Fprintf(w, "\t%s\t%s", namesBuffer.String(), imagesBuffer.String()) - return err -} - // Lay out all the containers on one line if use wide output. func layoutContainerCells(containers []api.Container) (names string, images string) { var namesBuffer bytes.Buffer @@ -1884,3 +1896,102 @@ func printControllerRevisionList(list *apps.ControllerRevisionList, options prin } return rows, nil } + +func printResourceQuota(resourceQuota *api.ResourceQuota, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { + row := metav1beta1.TableRow{ + Object: runtime.RawExtension{Object: resourceQuota}, + } + + resources := make([]api.ResourceName, 0, len(resourceQuota.Status.Hard)) + for resource := range resourceQuota.Status.Hard { + resources = append(resources, resource) + } + sort.Sort(SortableResourceNames(resources)) + + requestColumn := bytes.NewBuffer([]byte{}) + limitColumn := bytes.NewBuffer([]byte{}) + for i := range resources { + w := requestColumn + resource := resources[i] + usedQuantity := resourceQuota.Status.Used[resource] + hardQuantity := resourceQuota.Status.Hard[resource] + + // use limitColumn writer if a resource name prefixed with "limits" is found + if pieces := strings.Split(resource.String(), "."); len(pieces) > 1 && pieces[0] == "limits" { + w = limitColumn + } + + fmt.Fprintf(w, "%s: %s/%s, ", resource, usedQuantity.String(), hardQuantity.String()) + } + + age := translateTimestampSince(resourceQuota.CreationTimestamp) + row.Cells = append(row.Cells, resourceQuota.Name, age, strings.TrimSuffix(requestColumn.String(), ", "), strings.TrimSuffix(limitColumn.String(), ", ")) + return []metav1beta1.TableRow{row}, nil +} + +func printResourceQuotaList(list *api.ResourceQuotaList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { + rows := make([]metav1beta1.TableRow, 0, len(list.Items)) + for i := range list.Items { + r, err := printResourceQuota(&list.Items[i], options) + if err != nil { + return nil, err + } + rows = append(rows, r...) + } + return rows, nil +} + +func printPriorityClass(obj *scheduling.PriorityClass, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { + row := metav1beta1.TableRow{ + Object: runtime.RawExtension{Object: obj}, + } + + name := obj.Name + value := obj.Value + globalDefault := obj.GlobalDefault + row.Cells = append(row.Cells, name, int64(value), globalDefault, translateTimestampSince(obj.CreationTimestamp)) + + return []metav1beta1.TableRow{row}, nil +} + +func printPriorityClassList(list *scheduling.PriorityClassList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { + rows := make([]metav1beta1.TableRow, 0, len(list.Items)) + for i := range list.Items { + r, err := printPriorityClass(&list.Items[i], options) + if err != nil { + return nil, err + } + rows = append(rows, r...) + } + return rows, nil +} + +func printBoolPtr(value *bool) string { + if value != nil { + return printBool(*value) + } + + return "" +} + +func printBool(value bool) string { + if value { + return "True" + } + + return "False" +} + +type SortableResourceNames []api.ResourceName + +func (list SortableResourceNames) Len() int { + return len(list) +} + +func (list SortableResourceNames) Swap(i, j int) { + list[i], list[j] = list[j], list[i] +} + +func (list SortableResourceNames) Less(i, j int) bool { + return list[i] < list[j] +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/BUILD.bazel index 8948bfa09ae0f..93a2288ddd80e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "clusterrole_interfaces.go", "clusterrolebinding_interfaces.go", + "namespace.go", "reconcile_role.go", "reconcile_rolebindings.go", "role_interfaces.go", @@ -22,6 +23,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library", "//vendor/k8s.io/kubernetes/pkg/registry/rbac/validation:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/namespace.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/namespace.go new file mode 100644 index 0000000000000..2ee7fe9a2b0a1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/namespace.go @@ -0,0 +1,44 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciliation + +import ( + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// tryEnsureNamespace gets or creates the given namespace while ignoring forbidden errors. +// It is a best effort attempt as the user may not be able to get or create namespaces. +// This allows us to handle flows where the user can only mutate roles and role bindings. +func tryEnsureNamespace(client corev1client.NamespaceInterface, namespace string) error { + _, getErr := client.Get(namespace, metav1.GetOptions{}) + if getErr == nil { + return nil + } + + if fatalGetErr := utilerrors.FilterOut(getErr, apierrors.IsNotFound, apierrors.IsForbidden); fatalGetErr != nil { + return fatalGetErr + } + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + _, createErr := client.Create(ns) + + return utilerrors.FilterOut(createErr, apierrors.IsAlreadyExists, apierrors.IsForbidden) +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/role_interfaces.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/role_interfaces.go index 24cb7899d3701..1c349d36e7d15 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/role_interfaces.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/role_interfaces.go @@ -17,9 +17,7 @@ limitations under the License. package reconciliation import ( - corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" @@ -90,8 +88,7 @@ func (c RoleModifier) Get(namespace, name string) (RuleOwner, error) { } func (c RoleModifier) Create(in RuleOwner) (RuleOwner, error) { - ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: in.GetNamespace()}} - if _, err := c.NamespaceClient.Create(ns); err != nil && !apierrors.IsAlreadyExists(err) { + if err := tryEnsureNamespace(c.NamespaceClient, in.GetNamespace()); err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go index 23bf6b653a88d..3d60537815e8a 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go @@ -17,9 +17,7 @@ limitations under the License. package reconciliation import ( - corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -92,8 +90,7 @@ func (c RoleBindingClientAdapter) Get(namespace, name string) (RoleBinding, erro } func (c RoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) { - ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: in.GetNamespace()}} - if _, err := c.NamespaceClient.Create(ns); err != nil && !apierrors.IsAlreadyExists(err) { + if err := tryEnsureNamespace(c.NamespaceClient, in.GetNamespace()); err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD.bazel index 9a4f4d27ade0b..12eeca3c6c716 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD.bazel @@ -12,13 +12,13 @@ go_library( importpath = "k8s.io/kubernetes/pkg/registry/rbac/validation", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/rbac:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/rbac/v1:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/rule.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/rule.go index 833ffc1e6c1ed..6c88791f8bef7 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/rule.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/rule.go @@ -22,7 +22,7 @@ import ( "fmt" "strings" - "github.com/golang/glog" + "k8s.io/klog" rbacv1 "k8s.io/api/rbac/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -61,7 +61,7 @@ func ConfirmNoEscalation(ctx context.Context, ruleResolver AuthorizationRuleReso ownerRules, err := ruleResolver.RulesFor(user, namespace) if err != nil { // As per AuthorizationRuleResolver contract, this may return a non fatal error with an incomplete list of policies. Log the error and continue. - glog.V(1).Infof("non-fatal error getting local rules for %v: %v", user, err) + klog.V(1).Infof("non-fatal error getting local rules for %v: %v", user, err) ruleResolutionErrors = append(ruleResolutionErrors, err) } diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD.bazel deleted file mode 100644 index d2ac477c7e617..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "scheduler_interface.go", - "types.go", - "well_known_labels.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm", - importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/apps/v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/scheduler/api:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/scheduler/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/doc.go b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/doc.go deleted file mode 100644 index ac7b0038073bf..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package algorithm contains a generic Scheduler interface and several -// implementations. -package algorithm // import "k8s.io/kubernetes/pkg/scheduler/algorithm" diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/BUILD.bazel deleted file mode 100644 index f74c63b64d9c9..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "non_zero.go", - "topologies.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util", - importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/non_zero.go b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/non_zero.go deleted file mode 100644 index b671945f33871..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/non_zero.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import "k8s.io/api/core/v1" - -// For each of these resources, a pod that doesn't request the resource explicitly -// will be treated as having requested the amount indicated below, for the purpose -// of computing priority only. This ensures that when scheduling zero-request pods, such -// pods will not all be scheduled to the machine with the smallest in-use request, -// and that when scheduling regular pods, such pods will not see zero-request pods as -// consuming no resources whatsoever. We chose these values to be similar to the -// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary. -// As described in #11713, we use request instead of limit to deal with resource requirements. - -// DefaultMilliCPURequest defines default milli cpu request number. -const DefaultMilliCPURequest int64 = 100 // 0.1 core -// DefaultMemoryRequest defines default memory request size. -const DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB - -// GetNonzeroRequests returns the default resource request if none is found or -// what is provided on the request. -func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) { - var outMilliCPU, outMemory int64 - // Override if un-set, but not if explicitly set to zero - if _, found := (*requests)[v1.ResourceCPU]; !found { - outMilliCPU = DefaultMilliCPURequest - } else { - outMilliCPU = requests.Cpu().MilliValue() - } - // Override if un-set, but not if explicitly set to zero - if _, found := (*requests)[v1.ResourceMemory]; !found { - outMemory = DefaultMemoryRequest - } else { - outMemory = requests.Memory().Value() - } - return outMilliCPU, outMemory -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/topologies.go b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/topologies.go deleted file mode 100644 index bf5ee53ac01d2..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/topologies.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/sets" -) - -// GetNamespacesFromPodAffinityTerm returns a set of names -// according to the namespaces indicated in podAffinityTerm. -// If namespaces is empty it considers the given pod's namespace. -func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.String { - names := sets.String{} - if len(podAffinityTerm.Namespaces) == 0 { - names.Insert(pod.Namespace) - } else { - names.Insert(podAffinityTerm.Namespaces...) - } - return names -} - -// PodMatchesTermsNamespaceAndSelector returns true if the given -// matches the namespace and selector defined by `s . -func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.String, selector labels.Selector) bool { - if !namespaces.Has(pod.Namespace) { - return false - } - - if !selector.Matches(labels.Set(pod.Labels)) { - return false - } - return true -} - -// NodesHaveSameTopologyKey checks if nodeA and nodeB have same label value with given topologyKey as label key. -// Returns false if topologyKey is empty. -func NodesHaveSameTopologyKey(nodeA, nodeB *v1.Node, topologyKey string) bool { - if len(topologyKey) == 0 { - return false - } - - if nodeA.Labels == nil || nodeB.Labels == nil { - return false - } - - nodeALabel, okA := nodeA.Labels[topologyKey] - nodeBLabel, okB := nodeB.Labels[topologyKey] - - // If found label in both nodes, check the label - if okB && okA { - return nodeALabel == nodeBLabel - } - - return false -} - -// Topologies contains topologies information of nodes. -type Topologies struct { - DefaultKeys []string -} - -// NodesHaveSameTopologyKey checks if nodeA and nodeB have same label value with given topologyKey as label key. -func (tps *Topologies) NodesHaveSameTopologyKey(nodeA, nodeB *v1.Node, topologyKey string) bool { - return NodesHaveSameTopologyKey(nodeA, nodeB, topologyKey) -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go deleted file mode 100644 index ffa275d45f034..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package algorithm - -import ( - "k8s.io/api/core/v1" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" -) - -// SchedulerExtender is an interface for external processes to influence scheduling -// decisions made by Kubernetes. This is typically needed for resources not directly -// managed by Kubernetes. -type SchedulerExtender interface { - // Filter based on extender-implemented predicate functions. The filtered list is - // expected to be a subset of the supplied list. failedNodesMap optionally contains - // the list of failed nodes and failure reasons. - Filter(pod *v1.Pod, - nodes []*v1.Node, nodeNameToInfo map[string]*schedulercache.NodeInfo, - ) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error) - - // Prioritize based on extender-implemented priority functions. The returned scores & weight - // are used to compute the weighted score for an extender. The weighted scores are added to - // the scores computed by Kubernetes scheduler. The total scores are used to do the host selection. - Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error) - - // Bind delegates the action of binding a pod to a node to the extender. - Bind(binding *v1.Binding) error - - // IsBinder returns whether this extender is configured for the Bind method. - IsBinder() bool - - // IsInterested returns true if at least one extended resource requested by - // this pod is managed by this extender. - IsInterested(pod *v1.Pod) bool - - // ProcessPreemption returns nodes with their victim pods processed by extender based on - // given: - // 1. Pod to schedule - // 2. Candidate nodes and victim pods (nodeToVictims) generated by previous scheduling process. - // 3. nodeNameToInfo to restore v1.Node from node name if extender cache is enabled. - // The possible changes made by extender may include: - // 1. Subset of given candidate nodes after preemption phase of extender. - // 2. A different set of victim pod for every given candidate node after preemption phase of extender. - ProcessPreemption( - pod *v1.Pod, - nodeToVictims map[*v1.Node]*schedulerapi.Victims, - nodeNameToInfo map[string]*schedulercache.NodeInfo, - ) (map[*v1.Node]*schedulerapi.Victims, error) - - // SupportsPreemption returns if the scheduler extender support preemption or not. - SupportsPreemption() bool - - // IsIgnorable returns true indicates scheduling should not fail when this extender - // is unavailable. This gives scheduler ability to fail fast and tolerate non-critical extenders as well. - IsIgnorable() bool -} - -// ScheduleAlgorithm is an interface implemented by things that know how to schedule pods -// onto machines. -type ScheduleAlgorithm interface { - Schedule(*v1.Pod, NodeLister) (selectedMachine string, err error) - // Preempt receives scheduling errors for a pod and tries to create room for - // the pod by preempting lower priority pods if possible. - // It returns the node where preemption happened, a list of preempted pods, a - // list of pods whose nominated node name should be removed, and error if any. - Preempt(*v1.Pod, NodeLister, error) (selectedNode *v1.Node, preemptedPods []*v1.Pod, cleanupNominatedPods []*v1.Pod, err error) - // Predicates() returns a pointer to a map of predicate functions. This is - // exposed for testing. - Predicates() map[string]FitPredicate - // Prioritizers returns a slice of priority config. This is exposed for - // testing. - Prioritizers() []PriorityConfig -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go deleted file mode 100644 index cd1535cb55880..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go +++ /dev/null @@ -1,171 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package algorithm - -import ( - apps "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" -) - -// NodeFieldSelectorKeys is a map that: the key are node field selector keys; the values are -// the functions to get the value of the node field. -var NodeFieldSelectorKeys = map[string]func(*v1.Node) string{ - NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name }, -} - -// FitPredicate is a function that indicates if a pod fits into an existing node. -// The failure information is given by the error. -type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []PredicateFailureReason, error) - -// PriorityMapFunction is a function that computes per-node results for a given node. -// TODO: Figure out the exact API of this method. -// TODO: Change interface{} to a specific type. -type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) - -// PriorityReduceFunction is a function that aggregated per-node results and computes -// final scores for all nodes. -// TODO: Figure out the exact API of this method. -// TODO: Change interface{} to a specific type. -type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error - -// PredicateMetadataProducer is a function that computes predicate metadata for a given pod. -type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) PredicateMetadata - -// PriorityMetadataProducer is a function that computes metadata for a given pod. This -// is now used for only for priority functions. For predicates please use PredicateMetadataProducer. -type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} - -// PriorityFunction is a function that computes scores for all nodes. -// DEPRECATED -// Use Map-Reduce pattern for priority functions. -type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) - -// PriorityConfig is a config used for a priority function. -type PriorityConfig struct { - Name string - Map PriorityMapFunction - Reduce PriorityReduceFunction - // TODO: Remove it after migrating all functions to - // Map-Reduce pattern. - Function PriorityFunction - Weight int -} - -// EmptyPredicateMetadataProducer returns a no-op MetadataProducer type. -func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) PredicateMetadata { - return nil -} - -// EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type. -func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { - return nil -} - -// PredicateFailureReason interface represents the failure reason of a predicate. -type PredicateFailureReason interface { - GetReason() string -} - -// NodeLister interface represents anything that can list nodes for a scheduler. -type NodeLister interface { - // We explicitly return []*v1.Node, instead of v1.NodeList, to avoid - // performing expensive copies that are unneeded. - List() ([]*v1.Node, error) -} - -// PodLister interface represents anything that can list pods for a scheduler. -type PodLister interface { - // We explicitly return []*v1.Pod, instead of v1.PodList, to avoid - // performing expensive copies that are unneeded. - List(labels.Selector) ([]*v1.Pod, error) - // This is similar to "List()", but the returned slice does not - // contain pods that don't pass `podFilter`. - FilteredList(podFilter schedulercache.PodFilter, selector labels.Selector) ([]*v1.Pod, error) -} - -// ServiceLister interface represents anything that can produce a list of services; the list is consumed by a scheduler. -type ServiceLister interface { - // Lists all the services - List(labels.Selector) ([]*v1.Service, error) - // Gets the services for the given pod - GetPodServices(*v1.Pod) ([]*v1.Service, error) -} - -// ControllerLister interface represents anything that can produce a list of ReplicationController; the list is consumed by a scheduler. -type ControllerLister interface { - // Lists all the replication controllers - List(labels.Selector) ([]*v1.ReplicationController, error) - // Gets the services for the given pod - GetPodControllers(*v1.Pod) ([]*v1.ReplicationController, error) -} - -// ReplicaSetLister interface represents anything that can produce a list of ReplicaSet; the list is consumed by a scheduler. -type ReplicaSetLister interface { - // Gets the replicasets for the given pod - GetPodReplicaSets(*v1.Pod) ([]*apps.ReplicaSet, error) -} - -var _ ControllerLister = &EmptyControllerLister{} - -// EmptyControllerLister implements ControllerLister on []v1.ReplicationController returning empty data -type EmptyControllerLister struct{} - -// List returns nil -func (f EmptyControllerLister) List(labels.Selector) ([]*v1.ReplicationController, error) { - return nil, nil -} - -// GetPodControllers returns nil -func (f EmptyControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.ReplicationController, err error) { - return nil, nil -} - -var _ ReplicaSetLister = &EmptyReplicaSetLister{} - -// EmptyReplicaSetLister implements ReplicaSetLister on []extensions.ReplicaSet returning empty data -type EmptyReplicaSetLister struct{} - -// GetPodReplicaSets returns nil -func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*apps.ReplicaSet, err error) { - return nil, nil -} - -// StatefulSetLister interface represents anything that can produce a list of StatefulSet; the list is consumed by a scheduler. -type StatefulSetLister interface { - // Gets the StatefulSet for the given pod. - GetPodStatefulSets(*v1.Pod) ([]*apps.StatefulSet, error) -} - -var _ StatefulSetLister = &EmptyStatefulSetLister{} - -// EmptyStatefulSetLister implements StatefulSetLister on []apps.StatefulSet returning empty data. -type EmptyStatefulSetLister struct{} - -// GetPodStatefulSets of EmptyStatefulSetLister returns nil. -func (f EmptyStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.StatefulSet, err error) { - return nil, nil -} - -// PredicateMetadata interface represents anything that can access a predicate metadata. -type PredicateMetadata interface { - ShallowCopy() PredicateMetadata - AddPod(addedPod *v1.Pod, nodeInfo *schedulercache.NodeInfo) error - RemovePod(deletedPod *v1.Pod) error -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD.bazel index e559d517ef9ea..c97e7d35560ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD.bazel @@ -6,6 +6,7 @@ go_library( "doc.go", "register.go", "types.go", + "well_known_labels.go", "zz_generated.deepcopy.go", ], importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/scheduler/api", @@ -18,5 +19,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/well_known_labels.go b/vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/well_known_labels.go rename to vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go index 3482649b4160a..afe64dd50f7f1 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/well_known_labels.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package algorithm +package api import ( api "k8s.io/kubernetes/pkg/apis/core" diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/cache/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/scheduler/cache/BUILD.bazel deleted file mode 100644 index b913492d54703..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/cache/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cache.go", - "interface.go", - "node_info.go", - "node_tree.go", - "util.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/scheduler/cache", - importpath = "k8s.io/kubernetes/pkg/scheduler/cache", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/policy/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/features:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/scheduler/util:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/util/node:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/cache/cache.go b/vendor/k8s.io/kubernetes/pkg/scheduler/cache/cache.go deleted file mode 100644 index 5fc3980492202..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/cache/cache.go +++ /dev/null @@ -1,609 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "fmt" - "sync" - "time" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/features" - - "github.com/golang/glog" - policy "k8s.io/api/policy/v1beta1" -) - -var ( - cleanAssumedPeriod = 1 * time.Second -) - -// New returns a Cache implementation. -// It automatically starts a go routine that manages expiration of assumed pods. -// "ttl" is how long the assumed pod will get expired. -// "stop" is the channel that would close the background goroutine. -func New(ttl time.Duration, stop <-chan struct{}) Cache { - cache := newSchedulerCache(ttl, cleanAssumedPeriod, stop) - cache.run() - return cache -} - -type schedulerCache struct { - stop <-chan struct{} - ttl time.Duration - period time.Duration - - // This mutex guards all fields within this cache struct. - mu sync.RWMutex - // a set of assumed pod keys. - // The key could further be used to get an entry in podStates. - assumedPods map[string]bool - // a map from pod key to podState. - podStates map[string]*podState - nodes map[string]*NodeInfo - nodeTree *NodeTree - pdbs map[string]*policy.PodDisruptionBudget - // A map from image name to its imageState. - imageStates map[string]*imageState -} - -type podState struct { - pod *v1.Pod - // Used by assumedPod to determinate expiration. - deadline *time.Time - // Used to block cache from expiring assumedPod if binding still runs - bindingFinished bool -} - -type imageState struct { - // Size of the image - size int64 - // A set of node names for nodes having this image present - nodes sets.String -} - -// ImageStateSummary provides summarized information about the state of an image. -type ImageStateSummary struct { - // Size of the image - Size int64 - // Used to track how many nodes have this image - NumNodes int -} - -// createImageStateSummary returns a summarizing snapshot of the given image's state. -func (cache *schedulerCache) createImageStateSummary(state *imageState) *ImageStateSummary { - return &ImageStateSummary{ - Size: state.size, - NumNodes: len(state.nodes), - } -} - -func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedulerCache { - return &schedulerCache{ - ttl: ttl, - period: period, - stop: stop, - - nodes: make(map[string]*NodeInfo), - nodeTree: newNodeTree(nil), - assumedPods: make(map[string]bool), - podStates: make(map[string]*podState), - pdbs: make(map[string]*policy.PodDisruptionBudget), - imageStates: make(map[string]*imageState), - } -} - -// Snapshot takes a snapshot of the current schedulerCache. The method has performance impact, -// and should be only used in non-critical path. -func (cache *schedulerCache) Snapshot() *Snapshot { - cache.mu.RLock() - defer cache.mu.RUnlock() - - nodes := make(map[string]*NodeInfo) - for k, v := range cache.nodes { - nodes[k] = v.Clone() - } - - assumedPods := make(map[string]bool) - for k, v := range cache.assumedPods { - assumedPods[k] = v - } - - pdbs := make(map[string]*policy.PodDisruptionBudget) - for k, v := range cache.pdbs { - pdbs[k] = v.DeepCopy() - } - - return &Snapshot{ - Nodes: nodes, - AssumedPods: assumedPods, - Pdbs: pdbs, - } -} - -func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*NodeInfo) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - for name, info := range cache.nodes { - if utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && info.TransientInfo != nil { - // Transient scheduler info is reset here. - info.TransientInfo.resetTransientSchedulerInfo() - } - if current, ok := nodeNameToInfo[name]; !ok || current.generation != info.generation { - nodeNameToInfo[name] = info.Clone() - } - } - for name := range nodeNameToInfo { - if _, ok := cache.nodes[name]; !ok { - delete(nodeNameToInfo, name) - } - } - return nil -} - -func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) { - alwaysTrue := func(p *v1.Pod) bool { return true } - return cache.FilteredList(alwaysTrue, selector) -} - -func (cache *schedulerCache) FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error) { - cache.mu.RLock() - defer cache.mu.RUnlock() - // podFilter is expected to return true for most or all of the pods. We - // can avoid expensive array growth without wasting too much memory by - // pre-allocating capacity. - maxSize := 0 - for _, info := range cache.nodes { - maxSize += len(info.pods) - } - pods := make([]*v1.Pod, 0, maxSize) - for _, info := range cache.nodes { - for _, pod := range info.pods { - if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) { - pods = append(pods, pod) - } - } - } - return pods, nil -} - -func (cache *schedulerCache) AssumePod(pod *v1.Pod) error { - key, err := getPodKey(pod) - if err != nil { - return err - } - - cache.mu.Lock() - defer cache.mu.Unlock() - if _, ok := cache.podStates[key]; ok { - return fmt.Errorf("pod %v is in the cache, so can't be assumed", key) - } - - cache.addPod(pod) - ps := &podState{ - pod: pod, - } - cache.podStates[key] = ps - cache.assumedPods[key] = true - return nil -} - -func (cache *schedulerCache) FinishBinding(pod *v1.Pod) error { - return cache.finishBinding(pod, time.Now()) -} - -// finishBinding exists to make tests determinitistic by injecting now as an argument -func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error { - key, err := getPodKey(pod) - if err != nil { - return err - } - - cache.mu.RLock() - defer cache.mu.RUnlock() - - glog.V(5).Infof("Finished binding for pod %v. Can be expired.", key) - currState, ok := cache.podStates[key] - if ok && cache.assumedPods[key] { - dl := now.Add(cache.ttl) - currState.bindingFinished = true - currState.deadline = &dl - } - return nil -} - -func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { - key, err := getPodKey(pod) - if err != nil { - return err - } - - cache.mu.Lock() - defer cache.mu.Unlock() - - currState, ok := cache.podStates[key] - if ok && currState.pod.Spec.NodeName != pod.Spec.NodeName { - return fmt.Errorf("pod %v was assumed on %v but assigned to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) - } - - switch { - // Only assumed pod can be forgotten. - case ok && cache.assumedPods[key]: - err := cache.removePod(pod) - if err != nil { - return err - } - delete(cache.assumedPods, key) - delete(cache.podStates, key) - default: - return fmt.Errorf("pod %v wasn't assumed so cannot be forgotten", key) - } - return nil -} - -// Assumes that lock is already acquired. -func (cache *schedulerCache) addPod(pod *v1.Pod) { - n, ok := cache.nodes[pod.Spec.NodeName] - if !ok { - n = NewNodeInfo() - cache.nodes[pod.Spec.NodeName] = n - } - n.AddPod(pod) -} - -// Assumes that lock is already acquired. -func (cache *schedulerCache) updatePod(oldPod, newPod *v1.Pod) error { - if err := cache.removePod(oldPod); err != nil { - return err - } - cache.addPod(newPod) - return nil -} - -// Assumes that lock is already acquired. -func (cache *schedulerCache) removePod(pod *v1.Pod) error { - n := cache.nodes[pod.Spec.NodeName] - if err := n.RemovePod(pod); err != nil { - return err - } - if len(n.pods) == 0 && n.node == nil { - delete(cache.nodes, pod.Spec.NodeName) - } - return nil -} - -func (cache *schedulerCache) AddPod(pod *v1.Pod) error { - key, err := getPodKey(pod) - if err != nil { - return err - } - - cache.mu.Lock() - defer cache.mu.Unlock() - - currState, ok := cache.podStates[key] - switch { - case ok && cache.assumedPods[key]: - if currState.pod.Spec.NodeName != pod.Spec.NodeName { - // The pod was added to a different node than it was assumed to. - glog.Warningf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) - // Clean this up. - cache.removePod(currState.pod) - cache.addPod(pod) - } - delete(cache.assumedPods, key) - cache.podStates[key].deadline = nil - cache.podStates[key].pod = pod - case !ok: - // Pod was expired. We should add it back. - cache.addPod(pod) - ps := &podState{ - pod: pod, - } - cache.podStates[key] = ps - default: - return fmt.Errorf("pod %v was already in added state", key) - } - return nil -} - -func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { - key, err := getPodKey(oldPod) - if err != nil { - return err - } - - cache.mu.Lock() - defer cache.mu.Unlock() - - currState, ok := cache.podStates[key] - switch { - // An assumed pod won't have Update/Remove event. It needs to have Add event - // before Update event, in which case the state would change from Assumed to Added. - case ok && !cache.assumedPods[key]: - if currState.pod.Spec.NodeName != newPod.Spec.NodeName { - glog.Errorf("Pod %v updated on a different node than previously added to.", key) - glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions") - } - if err := cache.updatePod(oldPod, newPod); err != nil { - return err - } - currState.pod = newPod - default: - return fmt.Errorf("pod %v is not added to scheduler cache, so cannot be updated", key) - } - return nil -} - -func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { - key, err := getPodKey(pod) - if err != nil { - return err - } - - cache.mu.Lock() - defer cache.mu.Unlock() - - currState, ok := cache.podStates[key] - switch { - // An assumed pod won't have Delete/Remove event. It needs to have Add event - // before Remove event, in which case the state would change from Assumed to Added. - case ok && !cache.assumedPods[key]: - if currState.pod.Spec.NodeName != pod.Spec.NodeName { - glog.Errorf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) - glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions") - } - err := cache.removePod(currState.pod) - if err != nil { - return err - } - delete(cache.podStates, key) - default: - return fmt.Errorf("pod %v is not found in scheduler cache, so cannot be removed from it", key) - } - return nil -} - -func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) { - key, err := getPodKey(pod) - if err != nil { - return false, err - } - - cache.mu.RLock() - defer cache.mu.RUnlock() - - b, found := cache.assumedPods[key] - if !found { - return false, nil - } - return b, nil -} - -func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) { - key, err := getPodKey(pod) - if err != nil { - return nil, err - } - - cache.mu.RLock() - defer cache.mu.RUnlock() - - podState, ok := cache.podStates[key] - if !ok { - return nil, fmt.Errorf("pod %v does not exist in scheduler cache", key) - } - - return podState.pod, nil -} - -func (cache *schedulerCache) AddNode(node *v1.Node) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - n, ok := cache.nodes[node.Name] - if !ok { - n = NewNodeInfo() - cache.nodes[node.Name] = n - } else { - cache.removeNodeImageStates(n.node) - } - - cache.nodeTree.AddNode(node) - cache.addNodeImageStates(node, n) - return n.SetNode(node) -} - -func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - n, ok := cache.nodes[newNode.Name] - if !ok { - n = NewNodeInfo() - cache.nodes[newNode.Name] = n - } else { - cache.removeNodeImageStates(n.node) - } - - cache.nodeTree.UpdateNode(oldNode, newNode) - cache.addNodeImageStates(newNode, n) - return n.SetNode(newNode) -} - -func (cache *schedulerCache) RemoveNode(node *v1.Node) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - n := cache.nodes[node.Name] - if err := n.RemoveNode(node); err != nil { - return err - } - // We remove NodeInfo for this node only if there aren't any pods on this node. - // We can't do it unconditionally, because notifications about pods are delivered - // in a different watch, and thus can potentially be observed later, even though - // they happened before node removal. - if len(n.pods) == 0 && n.node == nil { - delete(cache.nodes, node.Name) - } - - cache.nodeTree.RemoveNode(node) - cache.removeNodeImageStates(node) - return nil -} - -// addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in -// scheduler cache. This function assumes the lock to scheduler cache has been acquired. -func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *NodeInfo) { - newSum := make(map[string]*ImageStateSummary) - - for _, image := range node.Status.Images { - for _, name := range image.Names { - // update the entry in imageStates - state, ok := cache.imageStates[name] - if !ok { - state = &imageState{ - size: image.SizeBytes, - nodes: sets.NewString(node.Name), - } - cache.imageStates[name] = state - } else { - state.nodes.Insert(node.Name) - } - // create the imageStateSummary for this image - if _, ok := newSum[name]; !ok { - newSum[name] = cache.createImageStateSummary(state) - } - } - } - nodeInfo.imageStates = newSum -} - -// removeNodeImageStates removes the given node record from image entries having the node -// in imageStates cache. After the removal, if any image becomes free, i.e., the image -// is no longer available on any node, the image entry will be removed from imageStates. -func (cache *schedulerCache) removeNodeImageStates(node *v1.Node) { - if node == nil { - return - } - - for _, image := range node.Status.Images { - for _, name := range image.Names { - state, ok := cache.imageStates[name] - if ok { - state.nodes.Delete(node.Name) - if len(state.nodes) == 0 { - // Remove the unused image to make sure the length of - // imageStates represents the total number of different - // images on all nodes - delete(cache.imageStates, name) - } - } - } - } -} - -func (cache *schedulerCache) AddPDB(pdb *policy.PodDisruptionBudget) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - // Unconditionally update cache. - cache.pdbs[string(pdb.UID)] = pdb - return nil -} - -func (cache *schedulerCache) UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error { - return cache.AddPDB(newPDB) -} - -func (cache *schedulerCache) RemovePDB(pdb *policy.PodDisruptionBudget) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - delete(cache.pdbs, string(pdb.UID)) - return nil -} - -func (cache *schedulerCache) ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) { - cache.mu.RLock() - defer cache.mu.RUnlock() - var pdbs []*policy.PodDisruptionBudget - for _, pdb := range cache.pdbs { - if selector.Matches(labels.Set(pdb.Labels)) { - pdbs = append(pdbs, pdb) - } - } - return pdbs, nil -} - -func (cache *schedulerCache) IsUpToDate(n *NodeInfo) bool { - cache.mu.RLock() - defer cache.mu.RUnlock() - node, ok := cache.nodes[n.Node().Name] - return ok && n.generation == node.generation -} - -func (cache *schedulerCache) run() { - go wait.Until(cache.cleanupExpiredAssumedPods, cache.period, cache.stop) -} - -func (cache *schedulerCache) cleanupExpiredAssumedPods() { - cache.cleanupAssumedPods(time.Now()) -} - -// cleanupAssumedPods exists for making test deterministic by taking time as input argument. -func (cache *schedulerCache) cleanupAssumedPods(now time.Time) { - cache.mu.Lock() - defer cache.mu.Unlock() - - // The size of assumedPods should be small - for key := range cache.assumedPods { - ps, ok := cache.podStates[key] - if !ok { - panic("Key found in assumed set but not in podStates. Potentially a logical error.") - } - if !ps.bindingFinished { - glog.V(3).Infof("Couldn't expire cache for pod %v/%v. Binding is still in progress.", - ps.pod.Namespace, ps.pod.Name) - continue - } - if now.After(*ps.deadline) { - glog.Warningf("Pod %s/%s expired", ps.pod.Namespace, ps.pod.Name) - if err := cache.expirePod(key, ps); err != nil { - glog.Errorf("ExpirePod failed for %s: %v", key, err) - } - } - } -} - -func (cache *schedulerCache) expirePod(key string, ps *podState) error { - if err := cache.removePod(ps.pod); err != nil { - return err - } - delete(cache.assumedPods, key) - delete(cache.podStates, key) - return nil -} - -func (cache *schedulerCache) NodeTree() *NodeTree { - return cache.nodeTree -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/cache/interface.go b/vendor/k8s.io/kubernetes/pkg/scheduler/cache/interface.go deleted file mode 100644 index 21eba905ef1d0..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/cache/interface.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/labels" -) - -// PodFilter is a function to filter a pod. If pod passed return true else return false. -type PodFilter func(*v1.Pod) bool - -// Cache collects pods' information and provides node-level aggregated information. -// It's intended for generic scheduler to do efficient lookup. -// Cache's operations are pod centric. It does incremental updates based on pod events. -// Pod events are sent via network. We don't have guaranteed delivery of all events: -// We use Reflector to list and watch from remote. -// Reflector might be slow and do a relist, which would lead to missing events. -// -// State Machine of a pod's events in scheduler's cache: -// -// -// +-------------------------------------------+ +----+ -// | Add | | | -// | | | | Update -// + Assume Add v v | -//Initial +--------> Assumed +------------+---> Added <--+ -// ^ + + | + -// | | | | | -// | | | Add | | Remove -// | | | | | -// | | | + | -// +----------------+ +-----------> Expired +----> Deleted -// Forget Expire -// -// -// Note that an assumed pod can expire, because if we haven't received Add event notifying us -// for a while, there might be some problems and we shouldn't keep the pod in cache anymore. -// -// Note that "Initial", "Expired", and "Deleted" pods do not actually exist in cache. -// Based on existing use cases, we are making the following assumptions: -// - No pod would be assumed twice -// - A pod could be added without going through scheduler. In this case, we will see Add but not Assume event. -// - If a pod wasn't added, it wouldn't be removed or updated. -// - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue, -// a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache. -type Cache interface { - // AssumePod assumes a pod scheduled and aggregates the pod's information into its node. - // The implementation also decides the policy to expire pod before being confirmed (receiving Add event). - // After expiration, its information would be subtracted. - AssumePod(pod *v1.Pod) error - - // FinishBinding signals that cache for assumed pod can be expired - FinishBinding(pod *v1.Pod) error - - // ForgetPod removes an assumed pod from cache. - ForgetPod(pod *v1.Pod) error - - // AddPod either confirms a pod if it's assumed, or adds it back if it's expired. - // If added back, the pod's information would be added again. - AddPod(pod *v1.Pod) error - - // UpdatePod removes oldPod's information and adds newPod's information. - UpdatePod(oldPod, newPod *v1.Pod) error - - // RemovePod removes a pod. The pod's information would be subtracted from assigned node. - RemovePod(pod *v1.Pod) error - - // GetPod returns the pod from the cache with the same namespace and the - // same name of the specified pod. - GetPod(pod *v1.Pod) (*v1.Pod, error) - - // IsAssumedPod returns true if the pod is assumed and not expired. - IsAssumedPod(pod *v1.Pod) (bool, error) - - // AddNode adds overall information about node. - AddNode(node *v1.Node) error - - // UpdateNode updates overall information about node. - UpdateNode(oldNode, newNode *v1.Node) error - - // RemoveNode removes overall information about node. - RemoveNode(node *v1.Node) error - - // AddPDB adds a PodDisruptionBudget object to the cache. - AddPDB(pdb *policy.PodDisruptionBudget) error - - // UpdatePDB updates a PodDisruptionBudget object in the cache. - UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error - - // RemovePDB removes a PodDisruptionBudget object from the cache. - RemovePDB(pdb *policy.PodDisruptionBudget) error - - // List lists all cached PDBs matching the selector. - ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) - - // UpdateNodeNameToInfoMap updates the passed infoMap to the current contents of Cache. - // The node info contains aggregated information of pods scheduled (including assumed to be) - // on this node. - UpdateNodeNameToInfoMap(infoMap map[string]*NodeInfo) error - - // List lists all cached pods (including assumed ones). - List(labels.Selector) ([]*v1.Pod, error) - - // FilteredList returns all cached pods that pass the filter. - FilteredList(filter PodFilter, selector labels.Selector) ([]*v1.Pod, error) - - // Snapshot takes a snapshot on current cache - Snapshot() *Snapshot - - // IsUpToDate returns true if the given NodeInfo matches the current data in the cache. - IsUpToDate(n *NodeInfo) bool - - // NodeTree returns a node tree structure - NodeTree() *NodeTree -} - -// Snapshot is a snapshot of cache state -type Snapshot struct { - AssumedPods map[string]bool - Nodes map[string]*NodeInfo - Pdbs map[string]*policy.PodDisruptionBudget -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_info.go b/vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_info.go deleted file mode 100644 index c6b5f96cd8813..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_info.go +++ /dev/null @@ -1,651 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "errors" - "fmt" - "sync" - "sync/atomic" - - "github.com/golang/glog" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -var ( - emptyResource = Resource{} - generation int64 -) - -// NodeInfo is node level aggregated information. -type NodeInfo struct { - // Overall node information. - node *v1.Node - - pods []*v1.Pod - podsWithAffinity []*v1.Pod - usedPorts util.HostPortInfo - - // Total requested resource of all pods on this node. - // It includes assumed pods which scheduler sends binding to apiserver but - // didn't get it as scheduled yet. - requestedResource *Resource - nonzeroRequest *Resource - // We store allocatedResources (which is Node.Status.Allocatable.*) explicitly - // as int64, to avoid conversions and accessing map. - allocatableResource *Resource - - // Cached taints of the node for faster lookup. - taints []v1.Taint - taintsErr error - - // imageStates holds the entry of an image if and only if this image is on the node. The entry can be used for - // checking an image's existence and advanced usage (e.g., image locality scheduling policy) based on the image - // state information. - imageStates map[string]*ImageStateSummary - - // TransientInfo holds the information pertaining to a scheduling cycle. This will be destructed at the end of - // scheduling cycle. - // TODO: @ravig. Remove this once we have a clear approach for message passing across predicates and priorities. - TransientInfo *transientSchedulerInfo - - // Cached conditions of node for faster lookup. - memoryPressureCondition v1.ConditionStatus - diskPressureCondition v1.ConditionStatus - pidPressureCondition v1.ConditionStatus - - // Whenever NodeInfo changes, generation is bumped. - // This is used to avoid cloning it if the object didn't change. - generation int64 -} - -//initializeNodeTransientInfo initializes transient information pertaining to node. -func initializeNodeTransientInfo() nodeTransientInfo { - return nodeTransientInfo{AllocatableVolumesCount: 0, RequestedVolumes: 0} -} - -// nextGeneration: Let's make sure history never forgets the name... -// Increments the generation number monotonically ensuring that generation numbers never collide. -// Collision of the generation numbers would be particularly problematic if a node was deleted and -// added back with the same name. See issue#63262. -func nextGeneration() int64 { - return atomic.AddInt64(&generation, 1) -} - -// nodeTransientInfo contains transient node information while scheduling. -type nodeTransientInfo struct { - // AllocatableVolumesCount contains number of volumes that could be attached to node. - AllocatableVolumesCount int - // Requested number of volumes on a particular node. - RequestedVolumes int -} - -// transientSchedulerInfo is a transient structure which is destructed at the end of each scheduling cycle. -// It consists of items that are valid for a scheduling cycle and is used for message passing across predicates and -// priorities. Some examples which could be used as fields are number of volumes being used on node, current utilization -// on node etc. -// IMPORTANT NOTE: Make sure that each field in this structure is documented along with usage. Expand this structure -// only when absolutely needed as this data structure will be created and destroyed during every scheduling cycle. -type transientSchedulerInfo struct { - TransientLock sync.Mutex - // NodeTransInfo holds the information related to nodeTransientInformation. NodeName is the key here. - TransNodeInfo nodeTransientInfo -} - -// newTransientSchedulerInfo returns a new scheduler transient structure with initialized values. -func newTransientSchedulerInfo() *transientSchedulerInfo { - tsi := &transientSchedulerInfo{ - TransNodeInfo: initializeNodeTransientInfo(), - } - return tsi -} - -// resetTransientSchedulerInfo resets the transientSchedulerInfo. -func (transientSchedInfo *transientSchedulerInfo) resetTransientSchedulerInfo() { - transientSchedInfo.TransientLock.Lock() - defer transientSchedInfo.TransientLock.Unlock() - // Reset TransientNodeInfo. - transientSchedInfo.TransNodeInfo.AllocatableVolumesCount = 0 - transientSchedInfo.TransNodeInfo.RequestedVolumes = 0 -} - -// Resource is a collection of compute resource. -type Resource struct { - MilliCPU int64 - Memory int64 - EphemeralStorage int64 - // We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value()) - // explicitly as int, to avoid conversions and improve performance. - AllowedPodNumber int - // ScalarResources - ScalarResources map[v1.ResourceName]int64 -} - -// NewResource creates a Resource from ResourceList -func NewResource(rl v1.ResourceList) *Resource { - r := &Resource{} - r.Add(rl) - return r -} - -// Add adds ResourceList into Resource. -func (r *Resource) Add(rl v1.ResourceList) { - if r == nil { - return - } - - for rName, rQuant := range rl { - switch rName { - case v1.ResourceCPU: - r.MilliCPU += rQuant.MilliValue() - case v1.ResourceMemory: - r.Memory += rQuant.Value() - case v1.ResourcePods: - r.AllowedPodNumber += int(rQuant.Value()) - case v1.ResourceEphemeralStorage: - r.EphemeralStorage += rQuant.Value() - default: - if v1helper.IsScalarResourceName(rName) { - r.AddScalar(rName, rQuant.Value()) - } - } - } -} - -// ResourceList returns a resource list of this resource. -func (r *Resource) ResourceList() v1.ResourceList { - result := v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI), - v1.ResourcePods: *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI), - v1.ResourceEphemeralStorage: *resource.NewQuantity(r.EphemeralStorage, resource.BinarySI), - } - for rName, rQuant := range r.ScalarResources { - if v1helper.IsHugePageResourceName(rName) { - result[rName] = *resource.NewQuantity(rQuant, resource.BinarySI) - } else { - result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI) - } - } - return result -} - -// Clone returns a copy of this resource. -func (r *Resource) Clone() *Resource { - res := &Resource{ - MilliCPU: r.MilliCPU, - Memory: r.Memory, - AllowedPodNumber: r.AllowedPodNumber, - EphemeralStorage: r.EphemeralStorage, - } - if r.ScalarResources != nil { - res.ScalarResources = make(map[v1.ResourceName]int64) - for k, v := range r.ScalarResources { - res.ScalarResources[k] = v - } - } - return res -} - -// AddScalar adds a resource by a scalar value of this resource. -func (r *Resource) AddScalar(name v1.ResourceName, quantity int64) { - r.SetScalar(name, r.ScalarResources[name]+quantity) -} - -// SetScalar sets a resource by a scalar value of this resource. -func (r *Resource) SetScalar(name v1.ResourceName, quantity int64) { - // Lazily allocate scalar resource map. - if r.ScalarResources == nil { - r.ScalarResources = map[v1.ResourceName]int64{} - } - r.ScalarResources[name] = quantity -} - -// SetMaxResource compares with ResourceList and takes max value for each Resource. -func (r *Resource) SetMaxResource(rl v1.ResourceList) { - if r == nil { - return - } - - for rName, rQuantity := range rl { - switch rName { - case v1.ResourceMemory: - if mem := rQuantity.Value(); mem > r.Memory { - r.Memory = mem - } - case v1.ResourceCPU: - if cpu := rQuantity.MilliValue(); cpu > r.MilliCPU { - r.MilliCPU = cpu - } - case v1.ResourceEphemeralStorage: - if ephemeralStorage := rQuantity.Value(); ephemeralStorage > r.EphemeralStorage { - r.EphemeralStorage = ephemeralStorage - } - default: - if v1helper.IsScalarResourceName(rName) { - value := rQuantity.Value() - if value > r.ScalarResources[rName] { - r.SetScalar(rName, value) - } - } - } - } -} - -// NewNodeInfo returns a ready to use empty NodeInfo object. -// If any pods are given in arguments, their information will be aggregated in -// the returned object. -func NewNodeInfo(pods ...*v1.Pod) *NodeInfo { - ni := &NodeInfo{ - requestedResource: &Resource{}, - nonzeroRequest: &Resource{}, - allocatableResource: &Resource{}, - TransientInfo: newTransientSchedulerInfo(), - generation: nextGeneration(), - usedPorts: make(util.HostPortInfo), - imageStates: make(map[string]*ImageStateSummary), - } - for _, pod := range pods { - ni.AddPod(pod) - } - return ni -} - -// Node returns overall information about this node. -func (n *NodeInfo) Node() *v1.Node { - if n == nil { - return nil - } - return n.node -} - -// Pods return all pods scheduled (including assumed to be) on this node. -func (n *NodeInfo) Pods() []*v1.Pod { - if n == nil { - return nil - } - return n.pods -} - -// UsedPorts returns used ports on this node. -func (n *NodeInfo) UsedPorts() util.HostPortInfo { - if n == nil { - return nil - } - return n.usedPorts -} - -// ImageStates returns the state information of all images. -func (n *NodeInfo) ImageStates() map[string]*ImageStateSummary { - if n == nil { - return nil - } - return n.imageStates -} - -// PodsWithAffinity return all pods with (anti)affinity constraints on this node. -func (n *NodeInfo) PodsWithAffinity() []*v1.Pod { - if n == nil { - return nil - } - return n.podsWithAffinity -} - -// AllowedPodNumber returns the number of the allowed pods on this node. -func (n *NodeInfo) AllowedPodNumber() int { - if n == nil || n.allocatableResource == nil { - return 0 - } - return n.allocatableResource.AllowedPodNumber -} - -// Taints returns the taints list on this node. -func (n *NodeInfo) Taints() ([]v1.Taint, error) { - if n == nil { - return nil, nil - } - return n.taints, n.taintsErr -} - -// MemoryPressureCondition returns the memory pressure condition status on this node. -func (n *NodeInfo) MemoryPressureCondition() v1.ConditionStatus { - if n == nil { - return v1.ConditionUnknown - } - return n.memoryPressureCondition -} - -// DiskPressureCondition returns the disk pressure condition status on this node. -func (n *NodeInfo) DiskPressureCondition() v1.ConditionStatus { - if n == nil { - return v1.ConditionUnknown - } - return n.diskPressureCondition -} - -// PIDPressureCondition returns the pid pressure condition status on this node. -func (n *NodeInfo) PIDPressureCondition() v1.ConditionStatus { - if n == nil { - return v1.ConditionUnknown - } - return n.pidPressureCondition -} - -// RequestedResource returns aggregated resource request of pods on this node. -func (n *NodeInfo) RequestedResource() Resource { - if n == nil { - return emptyResource - } - return *n.requestedResource -} - -// NonZeroRequest returns aggregated nonzero resource request of pods on this node. -func (n *NodeInfo) NonZeroRequest() Resource { - if n == nil { - return emptyResource - } - return *n.nonzeroRequest -} - -// AllocatableResource returns allocatable resources on a given node. -func (n *NodeInfo) AllocatableResource() Resource { - if n == nil { - return emptyResource - } - return *n.allocatableResource -} - -// SetAllocatableResource sets the allocatableResource information of given node. -func (n *NodeInfo) SetAllocatableResource(allocatableResource *Resource) { - n.allocatableResource = allocatableResource - n.generation = nextGeneration() -} - -// Clone returns a copy of this node. -func (n *NodeInfo) Clone() *NodeInfo { - clone := &NodeInfo{ - node: n.node, - requestedResource: n.requestedResource.Clone(), - nonzeroRequest: n.nonzeroRequest.Clone(), - allocatableResource: n.allocatableResource.Clone(), - taintsErr: n.taintsErr, - TransientInfo: n.TransientInfo, - memoryPressureCondition: n.memoryPressureCondition, - diskPressureCondition: n.diskPressureCondition, - pidPressureCondition: n.pidPressureCondition, - usedPorts: make(util.HostPortInfo), - imageStates: n.imageStates, - generation: n.generation, - } - if len(n.pods) > 0 { - clone.pods = append([]*v1.Pod(nil), n.pods...) - } - if len(n.usedPorts) > 0 { - // util.HostPortInfo is a map-in-map struct - // make sure it's deep copied - for ip, portMap := range n.usedPorts { - clone.usedPorts[ip] = make(map[util.ProtocolPort]struct{}) - for protocolPort, v := range portMap { - clone.usedPorts[ip][protocolPort] = v - } - } - } - if len(n.podsWithAffinity) > 0 { - clone.podsWithAffinity = append([]*v1.Pod(nil), n.podsWithAffinity...) - } - if len(n.taints) > 0 { - clone.taints = append([]v1.Taint(nil), n.taints...) - } - return clone -} - -// VolumeLimits returns volume limits associated with the node -func (n *NodeInfo) VolumeLimits() map[v1.ResourceName]int64 { - volumeLimits := map[v1.ResourceName]int64{} - for k, v := range n.AllocatableResource().ScalarResources { - if v1helper.IsAttachableVolumeResourceName(k) { - volumeLimits[k] = v - } - } - return volumeLimits -} - -// String returns representation of human readable format of this NodeInfo. -func (n *NodeInfo) String() string { - podKeys := make([]string, len(n.pods)) - for i, pod := range n.pods { - podKeys[i] = pod.Name - } - return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v, UsedPort: %#v, AllocatableResource:%#v}", - podKeys, n.requestedResource, n.nonzeroRequest, n.usedPorts, n.allocatableResource) -} - -func hasPodAffinityConstraints(pod *v1.Pod) bool { - affinity := pod.Spec.Affinity - return affinity != nil && (affinity.PodAffinity != nil || affinity.PodAntiAffinity != nil) -} - -// AddPod adds pod information to this NodeInfo. -func (n *NodeInfo) AddPod(pod *v1.Pod) { - res, non0CPU, non0Mem := calculateResource(pod) - n.requestedResource.MilliCPU += res.MilliCPU - n.requestedResource.Memory += res.Memory - n.requestedResource.EphemeralStorage += res.EphemeralStorage - if n.requestedResource.ScalarResources == nil && len(res.ScalarResources) > 0 { - n.requestedResource.ScalarResources = map[v1.ResourceName]int64{} - } - for rName, rQuant := range res.ScalarResources { - n.requestedResource.ScalarResources[rName] += rQuant - } - n.nonzeroRequest.MilliCPU += non0CPU - n.nonzeroRequest.Memory += non0Mem - n.pods = append(n.pods, pod) - if hasPodAffinityConstraints(pod) { - n.podsWithAffinity = append(n.podsWithAffinity, pod) - } - - // Consume ports when pods added. - n.updateUsedPorts(pod, true) - - n.generation = nextGeneration() -} - -// RemovePod subtracts pod information from this NodeInfo. -func (n *NodeInfo) RemovePod(pod *v1.Pod) error { - k1, err := getPodKey(pod) - if err != nil { - return err - } - - for i := range n.podsWithAffinity { - k2, err := getPodKey(n.podsWithAffinity[i]) - if err != nil { - glog.Errorf("Cannot get pod key, err: %v", err) - continue - } - if k1 == k2 { - // delete the element - n.podsWithAffinity[i] = n.podsWithAffinity[len(n.podsWithAffinity)-1] - n.podsWithAffinity = n.podsWithAffinity[:len(n.podsWithAffinity)-1] - break - } - } - for i := range n.pods { - k2, err := getPodKey(n.pods[i]) - if err != nil { - glog.Errorf("Cannot get pod key, err: %v", err) - continue - } - if k1 == k2 { - // delete the element - n.pods[i] = n.pods[len(n.pods)-1] - n.pods = n.pods[:len(n.pods)-1] - // reduce the resource data - res, non0CPU, non0Mem := calculateResource(pod) - - n.requestedResource.MilliCPU -= res.MilliCPU - n.requestedResource.Memory -= res.Memory - n.requestedResource.EphemeralStorage -= res.EphemeralStorage - if len(res.ScalarResources) > 0 && n.requestedResource.ScalarResources == nil { - n.requestedResource.ScalarResources = map[v1.ResourceName]int64{} - } - for rName, rQuant := range res.ScalarResources { - n.requestedResource.ScalarResources[rName] -= rQuant - } - n.nonzeroRequest.MilliCPU -= non0CPU - n.nonzeroRequest.Memory -= non0Mem - - // Release ports when remove Pods. - n.updateUsedPorts(pod, false) - - n.generation = nextGeneration() - - return nil - } - } - return fmt.Errorf("no corresponding pod %s in pods of node %s", pod.Name, n.node.Name) -} - -func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64) { - resPtr := &res - for _, c := range pod.Spec.Containers { - resPtr.Add(c.Resources.Requests) - - non0CPUReq, non0MemReq := priorityutil.GetNonzeroRequests(&c.Resources.Requests) - non0CPU += non0CPUReq - non0Mem += non0MemReq - // No non-zero resources for GPUs or opaque resources. - } - - return -} - -func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, add bool) { - for j := range pod.Spec.Containers { - container := &pod.Spec.Containers[j] - for k := range container.Ports { - podPort := &container.Ports[k] - if add { - n.usedPorts.Add(podPort.HostIP, string(podPort.Protocol), podPort.HostPort) - } else { - n.usedPorts.Remove(podPort.HostIP, string(podPort.Protocol), podPort.HostPort) - } - } - } -} - -// SetNode sets the overall node information. -func (n *NodeInfo) SetNode(node *v1.Node) error { - n.node = node - - n.allocatableResource = NewResource(node.Status.Allocatable) - - n.taints = node.Spec.Taints - for i := range node.Status.Conditions { - cond := &node.Status.Conditions[i] - switch cond.Type { - case v1.NodeMemoryPressure: - n.memoryPressureCondition = cond.Status - case v1.NodeDiskPressure: - n.diskPressureCondition = cond.Status - case v1.NodePIDPressure: - n.pidPressureCondition = cond.Status - default: - // We ignore other conditions. - } - } - n.TransientInfo = newTransientSchedulerInfo() - n.generation = nextGeneration() - return nil -} - -// RemoveNode removes the overall information about the node. -func (n *NodeInfo) RemoveNode(node *v1.Node) error { - // We don't remove NodeInfo for because there can still be some pods on this node - - // this is because notifications about pods are delivered in a different watch, - // and thus can potentially be observed later, even though they happened before - // node removal. This is handled correctly in cache.go file. - n.node = nil - n.allocatableResource = &Resource{} - n.taints, n.taintsErr = nil, nil - n.memoryPressureCondition = v1.ConditionUnknown - n.diskPressureCondition = v1.ConditionUnknown - n.pidPressureCondition = v1.ConditionUnknown - n.imageStates = make(map[string]*ImageStateSummary) - n.generation = nextGeneration() - return nil -} - -// FilterOutPods receives a list of pods and filters out those whose node names -// are equal to the node of this NodeInfo, but are not found in the pods of this NodeInfo. -// -// Preemption logic simulates removal of pods on a node by removing them from the -// corresponding NodeInfo. In order for the simulation to work, we call this method -// on the pods returned from SchedulerCache, so that predicate functions see -// only the pods that are not removed from the NodeInfo. -func (n *NodeInfo) FilterOutPods(pods []*v1.Pod) []*v1.Pod { - node := n.Node() - if node == nil { - return pods - } - filtered := make([]*v1.Pod, 0, len(pods)) - for _, p := range pods { - if p.Spec.NodeName != node.Name { - filtered = append(filtered, p) - continue - } - // If pod is on the given node, add it to 'filtered' only if it is present in nodeInfo. - podKey, _ := getPodKey(p) - for _, np := range n.Pods() { - npodkey, _ := getPodKey(np) - if npodkey == podKey { - filtered = append(filtered, p) - break - } - } - } - return filtered -} - -// getPodKey returns the string key of a pod. -func getPodKey(pod *v1.Pod) (string, error) { - uid := string(pod.UID) - if len(uid) == 0 { - return "", errors.New("Cannot get cache key for pod with empty UID") - } - return uid, nil -} - -// Filter implements PodFilter interface. It returns false only if the pod node name -// matches NodeInfo.node and the pod is not found in the pods list. Otherwise, -// returns true. -func (n *NodeInfo) Filter(pod *v1.Pod) bool { - if pod.Spec.NodeName != n.node.Name { - return true - } - for _, p := range n.pods { - if p.Name == pod.Name && p.Namespace == pod.Namespace { - return true - } - } - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_tree.go b/vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_tree.go deleted file mode 100644 index d5112aab53c41..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_tree.go +++ /dev/null @@ -1,185 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "fmt" - "sync" - - "k8s.io/api/core/v1" - utilnode "k8s.io/kubernetes/pkg/util/node" - - "github.com/golang/glog" -) - -// NodeTree is a tree-like data structure that holds node names in each zone. Zone names are -// keys to "NodeTree.tree" and values of "NodeTree.tree" are arrays of node names. -type NodeTree struct { - tree map[string]*nodeArray // a map from zone (region-zone) to an array of nodes in the zone. - zones []string // a list of all the zones in the tree (keys) - zoneIndex int - NumNodes int - mu sync.RWMutex -} - -// nodeArray is a struct that has nodes that are in a zone. -// We use a slice (as opposed to a set/map) to store the nodes because iterating over the nodes is -// a lot more frequent than searching them by name. -type nodeArray struct { - nodes []string - lastIndex int -} - -func (na *nodeArray) next() (nodeName string, exhausted bool) { - if len(na.nodes) == 0 { - glog.Error("The nodeArray is empty. It should have been deleted from NodeTree.") - return "", false - } - if na.lastIndex >= len(na.nodes) { - return "", true - } - nodeName = na.nodes[na.lastIndex] - na.lastIndex++ - return nodeName, false -} - -func newNodeTree(nodes []*v1.Node) *NodeTree { - nt := &NodeTree{ - tree: make(map[string]*nodeArray), - } - for _, n := range nodes { - nt.AddNode(n) - } - return nt -} - -// AddNode adds a node and its corresponding zone to the tree. If the zone already exists, the node -// is added to the array of nodes in that zone. -func (nt *NodeTree) AddNode(n *v1.Node) { - nt.mu.Lock() - defer nt.mu.Unlock() - nt.addNode(n) -} - -func (nt *NodeTree) addNode(n *v1.Node) { - zone := utilnode.GetZoneKey(n) - if na, ok := nt.tree[zone]; ok { - for _, nodeName := range na.nodes { - if nodeName == n.Name { - glog.Warningf("node %v already exist in the NodeTree", n.Name) - return - } - } - na.nodes = append(na.nodes, n.Name) - } else { - nt.zones = append(nt.zones, zone) - nt.tree[zone] = &nodeArray{nodes: []string{n.Name}, lastIndex: 0} - } - glog.V(5).Infof("Added node %v in group %v to NodeTree", n.Name, zone) - nt.NumNodes++ -} - -// RemoveNode removes a node from the NodeTree. -func (nt *NodeTree) RemoveNode(n *v1.Node) error { - nt.mu.Lock() - defer nt.mu.Unlock() - return nt.removeNode(n) -} - -func (nt *NodeTree) removeNode(n *v1.Node) error { - zone := utilnode.GetZoneKey(n) - if na, ok := nt.tree[zone]; ok { - for i, nodeName := range na.nodes { - if nodeName == n.Name { - na.nodes = append(na.nodes[:i], na.nodes[i+1:]...) - if len(na.nodes) == 0 { - nt.removeZone(zone) - } - glog.V(5).Infof("Removed node %v in group %v from NodeTree", n.Name, zone) - nt.NumNodes-- - return nil - } - } - } - glog.Errorf("Node %v in group %v was not found", n.Name, zone) - return fmt.Errorf("node %v in group %v was not found", n.Name, zone) -} - -// removeZone removes a zone from tree. -// This function must be called while writer locks are hold. -func (nt *NodeTree) removeZone(zone string) { - delete(nt.tree, zone) - for i, z := range nt.zones { - if z == zone { - nt.zones = append(nt.zones[:i], nt.zones[i+1:]...) - } - } -} - -// UpdateNode updates a node in the NodeTree. -func (nt *NodeTree) UpdateNode(old, new *v1.Node) { - var oldZone string - if old != nil { - oldZone = utilnode.GetZoneKey(old) - } - newZone := utilnode.GetZoneKey(new) - // If the zone ID of the node has not changed, we don't need to do anything. Name of the node - // cannot be changed in an update. - if oldZone == newZone { - return - } - nt.mu.Lock() - defer nt.mu.Unlock() - nt.removeNode(old) // No error checking. We ignore whether the old node exists or not. - nt.addNode(new) -} - -func (nt *NodeTree) resetExhausted() { - for _, na := range nt.tree { - na.lastIndex = 0 - } - nt.zoneIndex = 0 -} - -// Next returns the name of the next node. NodeTree iterates over zones and in each zone iterates -// over nodes in a round robin fashion. -func (nt *NodeTree) Next() string { - nt.mu.Lock() - defer nt.mu.Unlock() - if len(nt.zones) == 0 { - return "" - } - numExhaustedZones := 0 - for { - if nt.zoneIndex >= len(nt.zones) { - nt.zoneIndex = 0 - } - zone := nt.zones[nt.zoneIndex] - nt.zoneIndex++ - // We do not check the exhausted zones before calling next() on the zone. This ensures - // that if more nodes are added to a zone after it is exhausted, we iterate over the new nodes. - nodeName, exhausted := nt.tree[zone].next() - if exhausted { - numExhaustedZones++ - if numExhaustedZones >= len(nt.zones) { // all zones are exhausted. we should reset. - nt.resetExhausted() - } - } else { - return nodeName - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/cache/util.go b/vendor/k8s.io/kubernetes/pkg/scheduler/cache/util.go deleted file mode 100644 index 5a252b6402ed4..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/cache/util.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" -) - -// CreateNodeNameToInfoMap obtains a list of pods and pivots that list into a map where the keys are node names -// and the values are the aggregated information for that node. -func CreateNodeNameToInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*NodeInfo { - nodeNameToInfo := make(map[string]*NodeInfo) - for _, pod := range pods { - nodeName := pod.Spec.NodeName - if _, ok := nodeNameToInfo[nodeName]; !ok { - nodeNameToInfo[nodeName] = NewNodeInfo() - } - nodeNameToInfo[nodeName].AddPod(pod) - } - imageExistenceMap := createImageExistenceMap(nodes) - - for _, node := range nodes { - if _, ok := nodeNameToInfo[node.Name]; !ok { - nodeNameToInfo[node.Name] = NewNodeInfo() - } - nodeInfo := nodeNameToInfo[node.Name] - nodeInfo.SetNode(node) - nodeInfo.imageStates = getNodeImageStates(node, imageExistenceMap) - } - return nodeNameToInfo -} - -// getNodeImageStates returns the given node's image states based on the given imageExistence map. -func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*ImageStateSummary { - imageStates := make(map[string]*ImageStateSummary) - - for _, image := range node.Status.Images { - for _, name := range image.Names { - imageStates[name] = &ImageStateSummary{ - Size: image.SizeBytes, - NumNodes: len(imageExistenceMap[name]), - } - } - } - return imageStates -} - -// createImageExistenceMap returns a map recording on which nodes the images exist, keyed by the images' names. -func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String { - imageExistenceMap := make(map[string]sets.String) - for _, node := range nodes { - for _, image := range node.Status.Images { - for _, name := range image.Names { - if _, ok := imageExistenceMap[name]; !ok { - imageExistenceMap[name] = sets.NewString(node.Name) - } else { - imageExistenceMap[name].Insert(node.Name) - } - } - } - } - return imageExistenceMap -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD.bazel deleted file mode 100644 index 00b073366cd8a..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "backoff_utils.go", - "utils.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/scheduler/util", - importpath = "k8s.io/kubernetes/pkg/scheduler/util", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/apis/scheduling:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/features:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/util/backoff_utils.go b/vendor/k8s.io/kubernetes/pkg/scheduler/util/backoff_utils.go deleted file mode 100644 index 50920ae86c83b..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/util/backoff_utils.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - ktypes "k8s.io/apimachinery/pkg/types" - - "github.com/golang/glog" -) - -type clock interface { - Now() time.Time -} - -type realClock struct{} - -func (realClock) Now() time.Time { - return time.Now() -} - -// BackoffEntry is single threaded. in particular, it only allows a single action to be waiting on backoff at a time. -// It is expected that all users will only use the public TryWait(...) method -// It is also not safe to copy this object. -type BackoffEntry struct { - backoff time.Duration - lastUpdate time.Time - reqInFlight int32 -} - -// tryLock attempts to acquire a lock via atomic compare and swap. -// returns true if the lock was acquired, false otherwise -func (b *BackoffEntry) tryLock() bool { - return atomic.CompareAndSwapInt32(&b.reqInFlight, 0, 1) -} - -// unlock returns the lock. panics if the lock isn't held -func (b *BackoffEntry) unlock() { - if !atomic.CompareAndSwapInt32(&b.reqInFlight, 1, 0) { - panic(fmt.Sprintf("unexpected state on unlocking: %+v", b)) - } -} - -// TryWait tries to acquire the backoff lock, maxDuration is the maximum allowed period to wait for. -func (b *BackoffEntry) TryWait(maxDuration time.Duration) bool { - if !b.tryLock() { - return false - } - defer b.unlock() - b.wait(maxDuration) - return true -} - -func (b *BackoffEntry) getBackoff(maxDuration time.Duration) time.Duration { - duration := b.backoff - newDuration := time.Duration(duration) * 2 - if newDuration > maxDuration { - newDuration = maxDuration - } - b.backoff = newDuration - glog.V(4).Infof("Backing off %s", duration.String()) - return duration -} - -func (b *BackoffEntry) wait(maxDuration time.Duration) { - time.Sleep(b.getBackoff(maxDuration)) -} - -// PodBackoff is used to restart a pod with back-off delay. -type PodBackoff struct { - perPodBackoff map[ktypes.NamespacedName]*BackoffEntry - lock sync.Mutex - clock clock - defaultDuration time.Duration - maxDuration time.Duration -} - -// MaxDuration returns the max time duration of the back-off. -func (p *PodBackoff) MaxDuration() time.Duration { - return p.maxDuration -} - -// CreateDefaultPodBackoff creates a default pod back-off object. -func CreateDefaultPodBackoff() *PodBackoff { - return CreatePodBackoff(1*time.Second, 60*time.Second) -} - -// CreatePodBackoff creates a pod back-off object by default duration and max duration. -func CreatePodBackoff(defaultDuration, maxDuration time.Duration) *PodBackoff { - return CreatePodBackoffWithClock(defaultDuration, maxDuration, realClock{}) -} - -// CreatePodBackoffWithClock creates a pod back-off object by default duration, max duration and clock. -func CreatePodBackoffWithClock(defaultDuration, maxDuration time.Duration, clock clock) *PodBackoff { - return &PodBackoff{ - perPodBackoff: map[ktypes.NamespacedName]*BackoffEntry{}, - clock: clock, - defaultDuration: defaultDuration, - maxDuration: maxDuration, - } -} - -// GetEntry returns a back-off entry by Pod ID. -func (p *PodBackoff) GetEntry(podID ktypes.NamespacedName) *BackoffEntry { - p.lock.Lock() - defer p.lock.Unlock() - entry, ok := p.perPodBackoff[podID] - if !ok { - entry = &BackoffEntry{backoff: p.defaultDuration} - p.perPodBackoff[podID] = entry - } - entry.lastUpdate = p.clock.Now() - return entry -} - -// Gc execute garbage collection on the pod back-off. -func (p *PodBackoff) Gc() { - p.lock.Lock() - defer p.lock.Unlock() - now := p.clock.Now() - for podID, entry := range p.perPodBackoff { - if now.Sub(entry.lastUpdate) > p.maxDuration { - delete(p.perPodBackoff, podID) - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go b/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go deleted file mode 100644 index 731ad7a014551..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go +++ /dev/null @@ -1,213 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "sort" - - "k8s.io/api/core/v1" - "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/apis/scheduling" - "k8s.io/kubernetes/pkg/features" -) - -// DefaultBindAllHostIP defines the default ip address used to bind to all host. -const DefaultBindAllHostIP = "0.0.0.0" - -// ProtocolPort represents a protocol port pair, e.g. tcp:80. -type ProtocolPort struct { - Protocol string - Port int32 -} - -// NewProtocolPort creates a ProtocolPort instance. -func NewProtocolPort(protocol string, port int32) *ProtocolPort { - pp := &ProtocolPort{ - Protocol: protocol, - Port: port, - } - - if len(pp.Protocol) == 0 { - pp.Protocol = string(v1.ProtocolTCP) - } - - return pp -} - -// HostPortInfo stores mapping from ip to a set of ProtocolPort -type HostPortInfo map[string]map[ProtocolPort]struct{} - -// Add adds (ip, protocol, port) to HostPortInfo -func (h HostPortInfo) Add(ip, protocol string, port int32) { - if port <= 0 { - return - } - - h.sanitize(&ip, &protocol) - - pp := NewProtocolPort(protocol, port) - if _, ok := h[ip]; !ok { - h[ip] = map[ProtocolPort]struct{}{ - *pp: {}, - } - return - } - - h[ip][*pp] = struct{}{} -} - -// Remove removes (ip, protocol, port) from HostPortInfo -func (h HostPortInfo) Remove(ip, protocol string, port int32) { - if port <= 0 { - return - } - - h.sanitize(&ip, &protocol) - - pp := NewProtocolPort(protocol, port) - if m, ok := h[ip]; ok { - delete(m, *pp) - if len(h[ip]) == 0 { - delete(h, ip) - } - } -} - -// Len returns the total number of (ip, protocol, port) tuple in HostPortInfo -func (h HostPortInfo) Len() int { - length := 0 - for _, m := range h { - length += len(m) - } - return length -} - -// CheckConflict checks if the input (ip, protocol, port) conflicts with the existing -// ones in HostPortInfo. -func (h HostPortInfo) CheckConflict(ip, protocol string, port int32) bool { - if port <= 0 { - return false - } - - h.sanitize(&ip, &protocol) - - pp := NewProtocolPort(protocol, port) - - // If ip is 0.0.0.0 check all IP's (protocol, port) pair - if ip == DefaultBindAllHostIP { - for _, m := range h { - if _, ok := m[*pp]; ok { - return true - } - } - return false - } - - // If ip isn't 0.0.0.0, only check IP and 0.0.0.0's (protocol, port) pair - for _, key := range []string{DefaultBindAllHostIP, ip} { - if m, ok := h[key]; ok { - if _, ok2 := m[*pp]; ok2 { - return true - } - } - } - - return false -} - -// sanitize the parameters -func (h HostPortInfo) sanitize(ip, protocol *string) { - if len(*ip) == 0 { - *ip = DefaultBindAllHostIP - } - if len(*protocol) == 0 { - *protocol = string(v1.ProtocolTCP) - } -} - -// GetContainerPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair -// will be in the result; but it does not resolve port conflict. -func GetContainerPorts(pods ...*v1.Pod) []*v1.ContainerPort { - var ports []*v1.ContainerPort - for _, pod := range pods { - for j := range pod.Spec.Containers { - container := &pod.Spec.Containers[j] - for k := range container.Ports { - ports = append(ports, &container.Ports[k]) - } - } - } - return ports -} - -// PodPriorityEnabled indicates whether pod priority feature is enabled. -func PodPriorityEnabled() bool { - return feature.DefaultFeatureGate.Enabled(features.PodPriority) -} - -// GetPodFullName returns a name that uniquely identifies a pod. -func GetPodFullName(pod *v1.Pod) string { - // Use underscore as the delimiter because it is not allowed in pod name - // (DNS subdomain format). - return pod.Name + "_" + pod.Namespace -} - -// GetPodPriority return priority of the given pod. -func GetPodPriority(pod *v1.Pod) int32 { - if pod.Spec.Priority != nil { - return *pod.Spec.Priority - } - // When priority of a running pod is nil, it means it was created at a time - // that there was no global default priority class and the priority class - // name of the pod was empty. So, we resolve to the static default priority. - return scheduling.DefaultPriorityWhenNoDefaultClassExists -} - -// SortableList is a list that implements sort.Interface. -type SortableList struct { - Items []interface{} - CompFunc LessFunc -} - -// LessFunc is a function that receives two items and returns true if the first -// item should be placed before the second one when the list is sorted. -type LessFunc func(item1, item2 interface{}) bool - -var _ = sort.Interface(&SortableList{}) - -func (l *SortableList) Len() int { return len(l.Items) } - -func (l *SortableList) Less(i, j int) bool { - return l.CompFunc(l.Items[i], l.Items[j]) -} - -func (l *SortableList) Swap(i, j int) { - l.Items[i], l.Items[j] = l.Items[j], l.Items[i] -} - -// Sort sorts the items in the list using the given CompFunc. Item1 is placed -// before Item2 when CompFunc(Item1, Item2) returns true. -func (l *SortableList) Sort() { - sort.Sort(l) -} - -// HigherPriorityPod return true when priority of the first pod is higher than -// the second one. It takes arguments of the type "interface{}" to be used with -// SortableList, but expects those arguments to be *v1.Pod. -func HigherPriorityPod(pod1, pod2 interface{}) bool { - return GetPodPriority(pod1.(*v1.Pod)) > GetPodPriority(pod2.(*v1.Pod)) -} diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go index 740698f205c44..25ea591fca08a 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go +++ b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go @@ -36,7 +36,7 @@ import ( // Set to true if the wrong build tags are set (see validate_disabled.go). var isDisabledBuild bool -// Interface for validating that a pod with with an AppArmor profile can be run by a Node. +// Interface for validating that a pod with an AppArmor profile can be run by a Node. type Validator interface { Validate(pod *v1.Pod) error ValidateHost() error diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD.bazel index c66789047cc49..edafb90297ebe 100644 --- a/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD.bazel @@ -12,7 +12,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/serviceaccount", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/gopkg.in/square/go-jose.v2:go_default_library", "//vendor/gopkg.in/square/go-jose.v2/jwt:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -20,6 +19,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/OWNERS b/vendor/k8s.io/kubernetes/pkg/serviceaccount/OWNERS index b2ae9a65995d7..d914c0d7195a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/serviceaccount/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/serviceaccount/OWNERS @@ -1,10 +1,7 @@ approvers: -- liggitt -- deads2k -- mikedanese +- sig-auth-serviceaccounts-approvers reviewers: -- liggitt -- deads2k -- mikedanese -- ericchiang -- enj +- sig-auth-serviceaccounts-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go b/vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go index 070418ffc8cf3..3d48b6f2dce42 100644 --- a/vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go +++ b/vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go @@ -21,11 +21,11 @@ import ( "fmt" "time" - "github.com/golang/glog" + "gopkg.in/square/go-jose.v2/jwt" + "k8s.io/klog" + apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/kubernetes/pkg/apis/core" - - "gopkg.in/square/go-jose.v2/jwt" ) // time.Now stubbed out to allow testing @@ -80,15 +80,13 @@ func Claims(sa core.ServiceAccount, pod *core.Pod, secret *core.Secret, expirati return sc, pc } -func NewValidator(audiences []string, getter ServiceAccountTokenGetter) Validator { +func NewValidator(getter ServiceAccountTokenGetter) Validator { return &validator{ - auds: audiences, getter: getter, } } type validator struct { - auds []string getter ServiceAccountTokenGetter } @@ -97,7 +95,7 @@ var _ = Validator(&validator{}) func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{}) (*ServiceAccountInfo, error) { private, ok := privateObj.(*privateClaims) if !ok { - glog.Errorf("jwt validator expected private claim of type *privateClaims but got: %T", privateObj) + klog.Errorf("jwt validator expected private claim of type *privateClaims but got: %T", privateObj) return nil, errors.New("Token could not be validated.") } err := public.Validate(jwt.Expected{ @@ -108,23 +106,10 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ case err == jwt.ErrExpired: return nil, errors.New("Token has expired.") default: - glog.Errorf("unexpected validation error: %T", err) + klog.Errorf("unexpected validation error: %T", err) return nil, errors.New("Token could not be validated.") } - var audValid bool - - for _, aud := range v.auds { - audValid = public.Audience.Contains(aud) - if audValid { - break - } - } - - if !audValid { - return nil, errors.New("Token is invalid for this audience.") - } - namespace := private.Kubernetes.Namespace saref := private.Kubernetes.Svcacct podref := private.Kubernetes.Pod @@ -132,15 +117,15 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ // Make sure service account still exists (name and UID) serviceAccount, err := v.getter.GetServiceAccount(namespace, saref.Name) if err != nil { - glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, saref.Name, err) + klog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, saref.Name, err) return nil, err } if serviceAccount.DeletionTimestamp != nil { - glog.V(4).Infof("Service account has been deleted %s/%s", namespace, saref.Name) + klog.V(4).Infof("Service account has been deleted %s/%s", namespace, saref.Name) return nil, fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, saref.Name) } if string(serviceAccount.UID) != saref.UID { - glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, saref.Name, string(serviceAccount.UID), saref.UID) + klog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, saref.Name, string(serviceAccount.UID), saref.UID) return nil, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, saref.UID) } @@ -148,15 +133,15 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ // Make sure token hasn't been invalidated by deletion of the secret secret, err := v.getter.GetSecret(namespace, secref.Name) if err != nil { - glog.V(4).Infof("Could not retrieve bound secret %s/%s for service account %s/%s: %v", namespace, secref.Name, namespace, saref.Name, err) + klog.V(4).Infof("Could not retrieve bound secret %s/%s for service account %s/%s: %v", namespace, secref.Name, namespace, saref.Name, err) return nil, errors.New("Token has been invalidated") } if secret.DeletionTimestamp != nil { - glog.V(4).Infof("Bound secret is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secref.Name, namespace, saref.Name) + klog.V(4).Infof("Bound secret is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secref.Name, namespace, saref.Name) return nil, errors.New("Token has been invalidated") } if secref.UID != string(secret.UID) { - glog.V(4).Infof("Secret UID no longer matches %s/%s: %q != %q", namespace, secref.Name, string(secret.UID), secref.UID) + klog.V(4).Infof("Secret UID no longer matches %s/%s: %q != %q", namespace, secref.Name, string(secret.UID), secref.UID) return nil, fmt.Errorf("Secret UID (%s) does not match claim (%s)", secret.UID, secref.UID) } } @@ -166,15 +151,15 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ // Make sure token hasn't been invalidated by deletion of the pod pod, err := v.getter.GetPod(namespace, podref.Name) if err != nil { - glog.V(4).Infof("Could not retrieve bound pod %s/%s for service account %s/%s: %v", namespace, podref.Name, namespace, saref.Name, err) + klog.V(4).Infof("Could not retrieve bound pod %s/%s for service account %s/%s: %v", namespace, podref.Name, namespace, saref.Name, err) return nil, errors.New("Token has been invalidated") } if pod.DeletionTimestamp != nil { - glog.V(4).Infof("Bound pod is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, podref.Name, namespace, saref.Name) + klog.V(4).Infof("Bound pod is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, podref.Name, namespace, saref.Name) return nil, errors.New("Token has been invalidated") } if podref.UID != string(pod.UID) { - glog.V(4).Infof("Pod UID no longer matches %s/%s: %q != %q", namespace, podref.Name, string(pod.UID), podref.UID) + klog.V(4).Infof("Pod UID no longer matches %s/%s: %q != %q", namespace, podref.Name, string(pod.UID), podref.UID) return nil, fmt.Errorf("Pod UID (%s) does not match claim (%s)", pod.UID, podref.UID) } podName = podref.Name diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go b/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go index fe4dc5b70359b..233fdee2d68e9 100644 --- a/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go +++ b/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go @@ -17,6 +17,7 @@ limitations under the License. package serviceaccount import ( + "context" "crypto/ecdsa" "crypto/elliptic" "crypto/rsa" @@ -25,13 +26,12 @@ import ( "fmt" "strings" + jose "gopkg.in/square/go-jose.v2" + "gopkg.in/square/go-jose.v2/jwt" + "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/authentication/authenticator" - "k8s.io/apiserver/pkg/authentication/user" - - jose "gopkg.in/square/go-jose.v2" - "gopkg.in/square/go-jose.v2/jwt" ) // ServiceAccountTokenGetter defines functions to retrieve a named service account and secret @@ -111,21 +111,23 @@ func (j *jwtTokenGenerator) GenerateToken(claims *jwt.Claims, privateClaims inte // JWTTokenAuthenticator authenticates tokens as JWT tokens produced by JWTTokenGenerator // Token signatures are verified using each of the given public keys until one works (allowing key rotation) // If lookup is true, the service account and secret referenced as claims inside the token are retrieved and verified with the provided ServiceAccountTokenGetter -func JWTTokenAuthenticator(iss string, keys []interface{}, validator Validator) authenticator.Token { +func JWTTokenAuthenticator(iss string, keys []interface{}, implicitAuds authenticator.Audiences, validator Validator) authenticator.Token { return &jwtTokenAuthenticator{ - iss: iss, - keys: keys, - validator: validator, + iss: iss, + keys: keys, + implicitAuds: implicitAuds, + validator: validator, } } type jwtTokenAuthenticator struct { - iss string - keys []interface{} - validator Validator + iss string + keys []interface{} + validator Validator + implicitAuds authenticator.Audiences } -// Validator is called by the JWT token authentictaor to apply domain specific +// Validator is called by the JWT token authenticator to apply domain specific // validation to a token and extract user information. type Validator interface { // Validate validates a token and returns user information or an error. @@ -140,7 +142,7 @@ type Validator interface { NewPrivateClaims() interface{} } -func (j *jwtTokenAuthenticator) AuthenticateToken(tokenData string) (user.Info, bool, error) { +func (j *jwtTokenAuthenticator) AuthenticateToken(ctx context.Context, tokenData string) (*authenticator.Response, bool, error) { if !j.hasCorrectIssuer(tokenData) { return nil, false, nil } @@ -170,6 +172,23 @@ func (j *jwtTokenAuthenticator) AuthenticateToken(tokenData string) (user.Info, return nil, false, utilerrors.NewAggregate(errlist) } + tokenAudiences := authenticator.Audiences(public.Audience) + if len(tokenAudiences) == 0 { + // only apiserver audiences are allowed for legacy tokens + tokenAudiences = j.implicitAuds + } + + requestedAudiences, ok := authenticator.AudiencesFrom(ctx) + if !ok { + // default to apiserver audiences + requestedAudiences = j.implicitAuds + } + + auds := authenticator.Audiences(tokenAudiences).Intersect(requestedAudiences) + if len(auds) == 0 && len(j.implicitAuds) != 0 { + return nil, false, fmt.Errorf("token audiences %q is invalid for the target audiences %q", tokenAudiences, requestedAudiences) + } + // If we get here, we have a token with a recognized signature and // issuer string. sa, err := j.validator.Validate(tokenData, public, private) @@ -177,7 +196,10 @@ func (j *jwtTokenAuthenticator) AuthenticateToken(tokenData string) (user.Info, return nil, false, err } - return sa.UserInfo(), true, nil + return &authenticator.Response{ + User: sa.UserInfo(), + Audiences: auds, + }, true, nil } // hasCorrectIssuer returns true if tokenData is a valid JWT in compact diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go b/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go index 79ca8f1b851d5..57c482f0ba62d 100644 --- a/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go +++ b/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go @@ -21,11 +21,11 @@ import ( "errors" "fmt" + "gopkg.in/square/go-jose.v2/jwt" + "k8s.io/klog" + "k8s.io/api/core/v1" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" - - "github.com/golang/glog" - "gopkg.in/square/go-jose.v2/jwt" ) func LegacyClaims(serviceAccount v1.ServiceAccount, secret v1.Secret) (*jwt.Claims, interface{}) { @@ -65,7 +65,7 @@ var _ = Validator(&legacyValidator{}) func (v *legacyValidator) Validate(tokenData string, public *jwt.Claims, privateObj interface{}) (*ServiceAccountInfo, error) { private, ok := privateObj.(*legacyPrivateClaims) if !ok { - glog.Errorf("jwt validator expected private claim of type *legacyPrivateClaims but got: %T", privateObj) + klog.Errorf("jwt validator expected private claim of type *legacyPrivateClaims but got: %T", privateObj) return nil, errors.New("Token could not be validated.") } @@ -99,30 +99,30 @@ func (v *legacyValidator) Validate(tokenData string, public *jwt.Claims, private // Make sure token hasn't been invalidated by deletion of the secret secret, err := v.getter.GetSecret(namespace, secretName) if err != nil { - glog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err) + klog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err) return nil, errors.New("Token has been invalidated") } if secret.DeletionTimestamp != nil { - glog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) + klog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) return nil, errors.New("Token has been invalidated") } if bytes.Compare(secret.Data[v1.ServiceAccountTokenKey], []byte(tokenData)) != 0 { - glog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) + klog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) return nil, errors.New("Token does not match server's copy") } // Make sure service account still exists (name and UID) serviceAccount, err := v.getter.GetServiceAccount(namespace, serviceAccountName) if err != nil { - glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err) + klog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err) return nil, err } if serviceAccount.DeletionTimestamp != nil { - glog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName) + klog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName) return nil, fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, serviceAccountName) } if string(serviceAccount.UID) != serviceAccountUID { - glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID) + klog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID) return nil, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID) } } diff --git a/vendor/k8s.io/kubernetes/pkg/util/labels/.readonly b/vendor/k8s.io/kubernetes/pkg/util/labels/.readonly deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD.bazel deleted file mode 100644 index c2e929f5494b4..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "labels.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/util/labels", - importpath = "k8s.io/kubernetes/pkg/util/labels", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/labels/labels.go b/vendor/k8s.io/kubernetes/pkg/util/labels/labels.go deleted file mode 100644 index 0ce48cfb52355..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/labels/labels.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package labels - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Clones the given map and returns a new map with the given key and value added. -// Returns the given map, if labelKey is empty. -func CloneAndAddLabel(labels map[string]string, labelKey, labelValue string) map[string]string { - if labelKey == "" { - // Don't need to add a label. - return labels - } - // Clone. - newLabels := map[string]string{} - for key, value := range labels { - newLabels[key] = value - } - newLabels[labelKey] = labelValue - return newLabels -} - -// CloneAndRemoveLabel clones the given map and returns a new map with the given key removed. -// Returns the given map, if labelKey is empty. -func CloneAndRemoveLabel(labels map[string]string, labelKey string) map[string]string { - if labelKey == "" { - // Don't need to add a label. - return labels - } - // Clone. - newLabels := map[string]string{} - for key, value := range labels { - newLabels[key] = value - } - delete(newLabels, labelKey) - return newLabels -} - -// AddLabel returns a map with the given key and value added to the given map. -func AddLabel(labels map[string]string, labelKey, labelValue string) map[string]string { - if labelKey == "" { - // Don't need to add a label. - return labels - } - if labels == nil { - labels = make(map[string]string) - } - labels[labelKey] = labelValue - return labels -} - -// Clones the given selector and returns a new selector with the given key and value added. -// Returns the given selector, if labelKey is empty. -func CloneSelectorAndAddLabel(selector *metav1.LabelSelector, labelKey, labelValue string) *metav1.LabelSelector { - if labelKey == "" { - // Don't need to add a label. - return selector - } - - // Clone. - newSelector := new(metav1.LabelSelector) - - // TODO(madhusudancs): Check if you can use deepCopy_extensions_LabelSelector here. - newSelector.MatchLabels = make(map[string]string) - if selector.MatchLabels != nil { - for key, val := range selector.MatchLabels { - newSelector.MatchLabels[key] = val - } - } - newSelector.MatchLabels[labelKey] = labelValue - - if selector.MatchExpressions != nil { - newMExps := make([]metav1.LabelSelectorRequirement, len(selector.MatchExpressions)) - for i, me := range selector.MatchExpressions { - newMExps[i].Key = me.Key - newMExps[i].Operator = me.Operator - if me.Values != nil { - newMExps[i].Values = make([]string, len(me.Values)) - copy(newMExps[i].Values, me.Values) - } else { - newMExps[i].Values = nil - } - } - newSelector.MatchExpressions = newMExps - } else { - newSelector.MatchExpressions = nil - } - - return newSelector -} - -// AddLabelToSelector returns a selector with the given key and value added to the given selector's MatchLabels. -func AddLabelToSelector(selector *metav1.LabelSelector, labelKey, labelValue string) *metav1.LabelSelector { - if labelKey == "" { - // Don't need to add a label. - return selector - } - if selector.MatchLabels == nil { - selector.MatchLabels = make(map[string]string) - } - selector.MatchLabels[labelKey] = labelValue - return selector -} - -// SelectorHasLabel checks if the given selector contains the given label key in its MatchLabels -func SelectorHasLabel(selector *metav1.LabelSelector, labelKey string) bool { - return len(selector.MatchLabels[labelKey]) > 0 -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD.bazel index 154ce4eae2930..3e46d618498a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "exec_mount_unsupported.go", "fake.go", "mount.go", + "mount_helper.go", "mount_linux.go", "mount_unsupported.go", "mount_windows.go", @@ -19,7 +20,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/util/mount", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go index 3c6638328f69e..634189dea9bc1 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go @@ -22,7 +22,7 @@ import ( "fmt" "os" - "github.com/golang/glog" + "k8s.io/klog" ) // ExecMounter is a mounter that uses provided Exec interface to mount and @@ -44,10 +44,10 @@ var _ Interface = &execMounter{} // Mount runs mount(8) using given exec interface. func (m *execMounter) Mount(source string, target string, fstype string, options []string) error { - bind, bindRemountOpts := isBind(options) + bind, bindOpts, bindRemountOpts := isBind(options) if bind { - err := m.doExecMount(source, target, fstype, []string{"bind"}) + err := m.doExecMount(source, target, fstype, bindOpts) if err != nil { return err } @@ -59,10 +59,10 @@ func (m *execMounter) Mount(source string, target string, fstype string, options // doExecMount calls exec(mount ) using given exec interface. func (m *execMounter) doExecMount(source, target, fstype string, options []string) error { - glog.V(5).Infof("Exec Mounting %s %s %s %v", source, target, fstype, options) + klog.V(5).Infof("Exec Mounting %s %s %s %v", source, target, fstype, options) mountArgs := makeMountArgs(source, target, fstype, options) output, err := m.exec.Run("mount", mountArgs...) - glog.V(5).Infof("Exec mounted %v: %v: %s", mountArgs, err, string(output)) + klog.V(5).Infof("Exec mounted %v: %v: %s", mountArgs, err, string(output)) if err != nil { return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n", err, "mount", source, target, fstype, options, string(output)) @@ -75,9 +75,9 @@ func (m *execMounter) doExecMount(source, target, fstype string, options []strin func (m *execMounter) Unmount(target string) error { outputBytes, err := m.exec.Run("umount", target) if err == nil { - glog.V(5).Infof("Exec unmounted %s: %s", target, string(outputBytes)) + klog.V(5).Infof("Exec unmounted %s: %s", target, string(outputBytes)) } else { - glog.V(5).Infof("Failed to exec unmount %s: err: %q, umount output: %s", target, err, string(outputBytes)) + klog.V(5).Infof("Failed to exec unmount %s: err: %q, umount output: %s", target, err, string(outputBytes)) } return err diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go b/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go index e834e297b3eb4..0e2952f3e02df 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go @@ -22,7 +22,7 @@ import ( "path/filepath" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // FakeMounter implements mount.Interface for tests. @@ -30,6 +30,8 @@ type FakeMounter struct { MountPoints []MountPoint Log []FakeAction Filesystem map[string]FileType + // Error to return for a path when calling IsLikelyNotMountPoint + MountCheckErrors map[string]error // Some tests run things in parallel, make sure the mounter does not produce // any golang's DATA RACE warnings. mutex sync.Mutex @@ -83,11 +85,8 @@ func (f *FakeMounter) Mount(source string, target string, fstype string, options } } } - // find 'ro' option - if option == "ro" { - // reuse MountPoint.Opts field to mark mount as readonly - opts = append(opts, "ro") - } + // reuse MountPoint.Opts field to mark mount as readonly + opts = append(opts, option) } // If target is a symlink, get its absolute path @@ -95,9 +94,8 @@ func (f *FakeMounter) Mount(source string, target string, fstype string, options if err != nil { absTarget = target } - f.MountPoints = append(f.MountPoints, MountPoint{Device: source, Path: absTarget, Type: fstype, Opts: opts}) - glog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget) + klog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget) f.Log = append(f.Log, FakeAction{Action: FakeActionMount, Target: absTarget, Source: source, FSType: fstype}) return nil } @@ -115,7 +113,7 @@ func (f *FakeMounter) Unmount(target string) error { newMountpoints := []MountPoint{} for _, mp := range f.MountPoints { if mp.Path == absTarget { - glog.V(5).Infof("Fake mounter: unmounted %s from %s", mp.Device, absTarget) + klog.V(5).Infof("Fake mounter: unmounted %s from %s", mp.Device, absTarget) // Don't copy it to newMountpoints continue } @@ -123,6 +121,7 @@ func (f *FakeMounter) Unmount(target string) error { } f.MountPoints = newMountpoints f.Log = append(f.Log, FakeAction{Action: FakeActionUnmount, Target: absTarget}) + delete(f.MountCheckErrors, target) return nil } @@ -145,7 +144,12 @@ func (f *FakeMounter) IsLikelyNotMountPoint(file string) (bool, error) { f.mutex.Lock() defer f.mutex.Unlock() - _, err := os.Stat(file) + err := f.MountCheckErrors[file] + if err != nil { + return false, err + } + + _, err = os.Stat(file) if err != nil { return true, err } @@ -158,11 +162,11 @@ func (f *FakeMounter) IsLikelyNotMountPoint(file string) (bool, error) { for _, mp := range f.MountPoints { if mp.Path == absFile { - glog.V(5).Infof("isLikelyNotMountPoint for %s: mounted %s, false", file, mp.Path) + klog.V(5).Infof("isLikelyNotMountPoint for %s: mounted %s, false", file, mp.Path) return false, nil } } - glog.V(5).Infof("isLikelyNotMountPoint for %s: true", file) + klog.V(5).Infof("isLikelyNotMountPoint for %s: true", file) return true, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go index b48caaffbb603..48dfde3da4d2e 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go @@ -267,6 +267,13 @@ func IsNotMountPoint(mounter Interface, file string) (bool, error) { if notMnt == false { return notMnt, nil } + + // Resolve any symlinks in file, kernel would do the same and use the resolved path in /proc/mounts + resolvedFile, err := mounter.EvalHostSymlinks(file) + if err != nil { + return true, err + } + // check all mountpoints since IsLikelyNotMountPoint // is not reliable for some mountpoint types mountPoints, mountPointsErr := mounter.List() @@ -274,7 +281,7 @@ func IsNotMountPoint(mounter Interface, file string) (bool, error) { return notMnt, mountPointsErr } for _, mp := range mountPoints { - if mounter.IsMountPointMatch(mp, file) { + if mounter.IsMountPointMatch(mp, resolvedFile) { notMnt = false break } @@ -286,7 +293,7 @@ func IsNotMountPoint(mounter Interface, file string) (bool, error) { // use in case of bind mount, due to the fact that bind mount doesn't respect mount options. // The list equals: // options - 'bind' + 'remount' (no duplicate) -func isBind(options []string) (bool, []string) { +func isBind(options []string) (bool, []string, []string) { // Because we have an FD opened on the subpath bind mount, the "bind" option // needs to be included, otherwise the mount target will error as busy if you // remount as readonly. @@ -295,22 +302,36 @@ func isBind(options []string) (bool, []string) { // volume mount to be read only. bindRemountOpts := []string{"bind", "remount"} bind := false + bindOpts := []string{"bind"} - if len(options) != 0 { - for _, option := range options { - switch option { - case "bind": - bind = true - break - case "remount": - break - default: - bindRemountOpts = append(bindRemountOpts, option) - } + // _netdev is a userspace mount option and does not automatically get added when + // bind mount is created and hence we must carry it over. + if checkForNetDev(options) { + bindOpts = append(bindOpts, "_netdev") + } + + for _, option := range options { + switch option { + case "bind": + bind = true + break + case "remount": + break + default: + bindRemountOpts = append(bindRemountOpts, option) } } - return bind, bindRemountOpts + return bind, bindOpts, bindRemountOpts +} + +func checkForNetDev(options []string) bool { + for _, option := range options { + if option == "_netdev" { + return true + } + } + return false } // TODO: this is a workaround for the unmount device issue caused by gci mounter. diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper.go new file mode 100644 index 0000000000000..42b002059839d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper.go @@ -0,0 +1,124 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "fmt" + "os" + "syscall" + + "k8s.io/klog" +) + +// CleanupMountPoint unmounts the given path and +// deletes the remaining directory if successful. +// if extensiveMountPointCheck is true +// IsNotMountPoint will be called instead of IsLikelyNotMountPoint. +// IsNotMountPoint is more expensive but properly handles bind mounts within the same fs. +func CleanupMountPoint(mountPath string, mounter Interface, extensiveMountPointCheck bool) error { + // mounter.ExistsPath cannot be used because for containerized kubelet, we need to check + // the path in the kubelet container, not on the host. + pathExists, pathErr := PathExists(mountPath) + if !pathExists { + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath) + return nil + } + corruptedMnt := IsCorruptedMnt(pathErr) + if pathErr != nil && !corruptedMnt { + return fmt.Errorf("Error checking path: %v", pathErr) + } + return doCleanupMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt) +} + +// doCleanupMountPoint unmounts the given path and +// deletes the remaining directory if successful. +// if extensiveMountPointCheck is true +// IsNotMountPoint will be called instead of IsLikelyNotMountPoint. +// IsNotMountPoint is more expensive but properly handles bind mounts within the same fs. +// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, and the mount point check +// will be skipped +func doCleanupMountPoint(mountPath string, mounter Interface, extensiveMountPointCheck bool, corruptedMnt bool) error { + if !corruptedMnt { + var notMnt bool + var err error + if extensiveMountPointCheck { + notMnt, err = IsNotMountPoint(mounter, mountPath) + } else { + notMnt, err = mounter.IsLikelyNotMountPoint(mountPath) + } + + if err != nil { + return err + } + + if notMnt { + klog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) + return os.Remove(mountPath) + } + } + + // Unmount the mount path + klog.V(4).Infof("%q is a mountpoint, unmounting", mountPath) + if err := mounter.Unmount(mountPath); err != nil { + return err + } + + notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath) + if mntErr != nil { + return mntErr + } + if notMnt { + klog.V(4).Infof("%q is unmounted, deleting the directory", mountPath) + return os.Remove(mountPath) + } + return fmt.Errorf("Failed to unmount path %v", mountPath) +} + +// TODO: clean this up to use pkg/util/file/FileExists +// PathExists returns true if the specified path exists. +func PathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } else if os.IsNotExist(err) { + return false, nil + } else if IsCorruptedMnt(err) { + return true, err + } else { + return false, err + } +} + +// IsCorruptedMnt return true if err is about corrupted mount point +func IsCorruptedMnt(err error) bool { + if err == nil { + return false + } + var underlyingError error + switch pe := err.(type) { + case nil: + return false + case *os.PathError: + underlyingError = pe.Err + case *os.LinkError: + underlyingError = pe.Err + case *os.SyscallError: + underlyingError = pe.Err + } + + return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go index 1df073218f90b..2ab4d1059ed1a 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go @@ -30,9 +30,9 @@ import ( "strings" "syscall" - "github.com/golang/glog" "golang.org/x/sys/unix" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" utilfile "k8s.io/kubernetes/pkg/util/file" utilio "k8s.io/kubernetes/pkg/util/io" utilexec "k8s.io/utils/exec" @@ -55,6 +55,7 @@ const ( fsckErrorsUncorrected = 4 // place for subpath mounts + // TODO: pass in directory using kubelet_getters instead containerSubPathDirectoryName = "volume-subpaths" // syscall.Openat flags used to traverse directories not following symlinks nofollowFlags = unix.O_RDONLY | unix.O_NOFOLLOW @@ -89,9 +90,9 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio // Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty. // All Linux distros are expected to be shipped with a mount utility that a support bind mounts. mounterPath := "" - bind, bindRemountOpts := isBind(options) + bind, bindOpts, bindRemountOpts := isBind(options) if bind { - err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, []string{"bind"}) + err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts) if err != nil { return err } @@ -143,12 +144,12 @@ func (m *Mounter) doMount(mounterPath string, mountCmd string, source string, ta // No code here, mountCmd and mountArgs are already populated. } - glog.V(4).Infof("Mounting cmd (%s) with arguments (%s)", mountCmd, mountArgs) + klog.V(4).Infof("Mounting cmd (%s) with arguments (%s)", mountCmd, mountArgs) command := exec.Command(mountCmd, mountArgs...) output, err := command.CombinedOutput() if err != nil { args := strings.Join(mountArgs, " ") - glog.Errorf("Mount failed: %v\nMounting command: %s\nMounting arguments: %s\nOutput: %s\n", err, mountCmd, args, string(output)) + klog.Errorf("Mount failed: %v\nMounting command: %s\nMounting arguments: %s\nOutput: %s\n", err, mountCmd, args, string(output)) return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s\nOutput: %s\n", err, mountCmd, args, string(output)) } @@ -161,7 +162,7 @@ func (m *Mounter) doMount(mounterPath string, mountCmd string, source string, ta // systemd-runs (needed by Mount()) works. func detectSystemd() bool { if _, err := exec.LookPath("systemd-run"); err != nil { - glog.V(2).Infof("Detected OS without systemd") + klog.V(2).Infof("Detected OS without systemd") return false } // Try to run systemd-run --scope /bin/true, that should be enough @@ -171,12 +172,12 @@ func detectSystemd() bool { cmd := exec.Command("systemd-run", "--description=Kubernetes systemd probe", "--scope", "true") output, err := cmd.CombinedOutput() if err != nil { - glog.V(2).Infof("Cannot run systemd-run, assuming non-systemd OS") - glog.V(4).Infof("systemd-run failed with: %v", err) - glog.V(4).Infof("systemd-run output: %s", string(output)) + klog.V(2).Infof("Cannot run systemd-run, assuming non-systemd OS") + klog.V(4).Infof("systemd-run failed with: %v", err) + klog.V(4).Infof("systemd-run output: %s", string(output)) return false } - glog.V(2).Infof("Detected OS with systemd") + klog.V(2).Infof("Detected OS with systemd") return true } @@ -208,7 +209,7 @@ func addSystemdScope(systemdRunPath, mountName, command string, args []string) ( // Unmount unmounts the target. func (mounter *Mounter) Unmount(target string) error { - glog.V(4).Infof("Unmounting %s", target) + klog.V(4).Infof("Unmounting %s", target) command := exec.Command("umount", target) output, err := command.CombinedOutput() if err != nil { @@ -290,7 +291,7 @@ func exclusiveOpenFailsOnDevice(pathname string) (bool, error) { } if !isDevice { - glog.Errorf("Path %q is not referring to a device.", pathname) + klog.Errorf("Path %q is not referring to a device.", pathname) return false, nil } fd, errno := unix.Open(pathname, unix.O_RDONLY|unix.O_EXCL, 0) @@ -319,11 +320,11 @@ func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (str func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) { refs, err := mounter.GetMountRefs(mountPath) if err != nil { - glog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) + klog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) return "", err } if len(refs) == 0 { - glog.V(4).Infof("Directory %s is not mounted", mountPath) + klog.V(4).Infof("Directory %s is not mounted", mountPath) return "", fmt.Errorf("directory %s is not mounted", mountPath) } basemountPath := path.Join(pluginDir, MountsInGlobalPDPath) @@ -331,7 +332,7 @@ func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (str if strings.HasPrefix(ref, basemountPath) { volumeID, err := filepath.Rel(basemountPath, ref) if err != nil { - glog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) + klog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) return "", err } return volumeID, nil @@ -437,26 +438,26 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, if !readOnly { // Run fsck on the disk to fix repairable issues, only do this for volumes requested as rw. - glog.V(4).Infof("Checking for issues with fsck on disk: %s", source) + klog.V(4).Infof("Checking for issues with fsck on disk: %s", source) args := []string{"-a", source} out, err := mounter.Exec.Run("fsck", args...) if err != nil { ee, isExitError := err.(utilexec.ExitError) switch { case err == utilexec.ErrExecutableNotFound: - glog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.") + klog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.") case isExitError && ee.ExitStatus() == fsckErrorsCorrected: - glog.Infof("Device %s has errors which were corrected by fsck.", source) + klog.Infof("Device %s has errors which were corrected by fsck.", source) case isExitError && ee.ExitStatus() == fsckErrorsUncorrected: return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s.", source, string(out)) case isExitError && ee.ExitStatus() > fsckErrorsUncorrected: - glog.Infof("`fsck` error %s", string(out)) + klog.Infof("`fsck` error %s", string(out)) } } } // Try to mount the disk - glog.V(4).Infof("Attempting to mount disk: %s %s %s", fstype, source, target) + klog.V(4).Infof("Attempting to mount disk: %s %s %s", fstype, source, target) mountErr := mounter.Interface.Mount(source, target, fstype, options) if mountErr != nil { // Mount failed. This indicates either that the disk is unformatted or @@ -485,14 +486,14 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, source, } } - glog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args) + klog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args) _, err := mounter.Exec.Run("mkfs."+fstype, args...) if err == nil { // the disk has been formatted successfully try to mount it again. - glog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target) + klog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target) return mounter.Interface.Mount(source, target, fstype, options) } - glog.Errorf("format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", source, fstype, target, options, err) + klog.Errorf("format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", source, fstype, target, options, err) return err } else { // Disk is already formatted and failed to mount @@ -511,10 +512,10 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, // GetDiskFormat uses 'blkid' to see if the given disk is unformated func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { args := []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", disk} - glog.V(4).Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args) + klog.V(4).Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args) dataOut, err := mounter.Exec.Run("blkid", args...) output := string(dataOut) - glog.V(4).Infof("Output: %q, err: %v", output, err) + klog.V(4).Infof("Output: %q, err: %v", output, err) if err != nil { if exit, ok := err.(utilexec.ExitError); ok { @@ -526,7 +527,7 @@ func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { return "", nil } } - glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err) + klog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err) return "", err } @@ -552,7 +553,7 @@ func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { } if len(pttype) > 0 { - glog.V(4).Infof("Disk %s detected partition table type: %s", disk, pttype) + klog.V(4).Infof("Disk %s detected partition table type: %s", disk, pttype) // Returns a special non-empty string as filesystem type, then kubelet // will not format it. return "unknown data, probably partitions", nil @@ -686,11 +687,11 @@ func doMakeRShared(path string, mountInfoFilename string) error { return err } if shared { - glog.V(4).Infof("Directory %s is already on a shared mount", path) + klog.V(4).Infof("Directory %s is already on a shared mount", path) return nil } - glog.V(2).Infof("Bind-mounting %q with shared mount propagation", path) + klog.V(2).Infof("Bind-mounting %q with shared mount propagation", path) // mount --bind /var/lib/kubelet /var/lib/kubelet if err := syscall.Mount(path, path, "" /*fstype*/, syscall.MS_BIND, "" /*data*/); err != nil { return fmt.Errorf("failed to bind-mount %s: %v", path, err) @@ -766,7 +767,7 @@ func prepareSubpathTarget(mounter Interface, subpath Subpath) (bool, string, err } if !notMount { // It's already mounted - glog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", bindPathTarget) + klog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", bindPathTarget) return true, bindPathTarget, nil } @@ -819,7 +820,7 @@ func doBindSubPath(mounter Interface, subpath Subpath) (hostPath string, err err if err != nil { return "", fmt.Errorf("error resolving symlinks in %q: %v", subpath.Path, err) } - glog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, newPath, subpath.VolumePath) + klog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, newPath, subpath.VolumePath) subpath.VolumePath = newVolumePath subpath.Path = newPath @@ -841,9 +842,9 @@ func doBindSubPath(mounter Interface, subpath Subpath) (hostPath string, err err defer func() { // Cleanup subpath on error if !success { - glog.V(4).Infof("doBindSubPath() failed for %q, cleaning up subpath", bindPathTarget) + klog.V(4).Infof("doBindSubPath() failed for %q, cleaning up subpath", bindPathTarget) if cleanErr := cleanSubPath(mounter, subpath); cleanErr != nil { - glog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr) + klog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr) } } }() @@ -853,13 +854,13 @@ func doBindSubPath(mounter Interface, subpath Subpath) (hostPath string, err err // Do the bind mount options := []string{"bind"} - glog.V(5).Infof("bind mounting %q at %q", mountSource, bindPathTarget) + klog.V(5).Infof("bind mounting %q at %q", mountSource, bindPathTarget) if err = mounter.Mount(mountSource, bindPathTarget, "" /*fstype*/, options); err != nil { return "", fmt.Errorf("error mounting %s: %s", subpath.Path, err) } success = true - glog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget) + klog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget) return bindPathTarget, nil } @@ -871,7 +872,7 @@ func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error { func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error { // scan /var/lib/kubelet/pods//volume-subpaths//* subPathDir := filepath.Join(podDir, containerSubPathDirectoryName, volumeName) - glog.V(4).Infof("Cleaning up subpath mounts for %s", subPathDir) + klog.V(4).Infof("Cleaning up subpath mounts for %s", subPathDir) containerDirs, err := ioutil.ReadDir(subPathDir) if err != nil { @@ -883,64 +884,61 @@ func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error for _, containerDir := range containerDirs { if !containerDir.IsDir() { - glog.V(4).Infof("Container file is not a directory: %s", containerDir.Name()) + klog.V(4).Infof("Container file is not a directory: %s", containerDir.Name()) continue } - glog.V(4).Infof("Cleaning up subpath mounts for container %s", containerDir.Name()) + klog.V(4).Infof("Cleaning up subpath mounts for container %s", containerDir.Name()) // scan /var/lib/kubelet/pods//volume-subpaths///* fullContainerDirPath := filepath.Join(subPathDir, containerDir.Name()) - subPaths, err := ioutil.ReadDir(fullContainerDirPath) - if err != nil { - return fmt.Errorf("error reading %s: %s", fullContainerDirPath, err) - } - for _, subPath := range subPaths { - if err = doCleanSubPath(mounter, fullContainerDirPath, subPath.Name()); err != nil { + err = filepath.Walk(fullContainerDirPath, func(path string, info os.FileInfo, err error) error { + if path == fullContainerDirPath { + // Skip top level directory + return nil + } + + // pass through errors and let doCleanSubPath handle them + if err = doCleanSubPath(mounter, fullContainerDirPath, filepath.Base(path)); err != nil { return err } + return nil + }) + if err != nil { + return fmt.Errorf("error processing %s: %s", fullContainerDirPath, err) } + // Whole container has been processed, remove its directory. if err := os.Remove(fullContainerDirPath); err != nil { return fmt.Errorf("error deleting %s: %s", fullContainerDirPath, err) } - glog.V(5).Infof("Removed %s", fullContainerDirPath) + klog.V(5).Infof("Removed %s", fullContainerDirPath) } // Whole pod volume subpaths have been cleaned up, remove its subpath directory. if err := os.Remove(subPathDir); err != nil { return fmt.Errorf("error deleting %s: %s", subPathDir, err) } - glog.V(5).Infof("Removed %s", subPathDir) + klog.V(5).Infof("Removed %s", subPathDir) // Remove entire subpath directory if it's the last one podSubPathDir := filepath.Join(podDir, containerSubPathDirectoryName) if err := os.Remove(podSubPathDir); err != nil && !os.IsExist(err) { return fmt.Errorf("error deleting %s: %s", podSubPathDir, err) } - glog.V(5).Infof("Removed %s", podSubPathDir) + klog.V(5).Infof("Removed %s", podSubPathDir) return nil } // doCleanSubPath tears down the single subpath bind mount func doCleanSubPath(mounter Interface, fullContainerDirPath, subPathIndex string) error { // process /var/lib/kubelet/pods//volume-subpaths/// - glog.V(4).Infof("Cleaning up subpath mounts for subpath %v", subPathIndex) + klog.V(4).Infof("Cleaning up subpath mounts for subpath %v", subPathIndex) fullSubPath := filepath.Join(fullContainerDirPath, subPathIndex) - notMnt, err := IsNotMountPoint(mounter, fullSubPath) - if err != nil { - return fmt.Errorf("error checking %s for mount: %s", fullSubPath, err) - } - // Unmount it - if !notMnt { - if err = mounter.Unmount(fullSubPath); err != nil { - return fmt.Errorf("error unmounting %s: %s", fullSubPath, err) - } - glog.V(5).Infof("Unmounted %s", fullSubPath) - } - // Remove it *non*-recursively, just in case there were some hiccups. - if err = os.Remove(fullSubPath); err != nil { - return fmt.Errorf("error deleting %s: %s", fullSubPath, err) + + if err := CleanupMountPoint(fullSubPath, mounter, true); err != nil { + return fmt.Errorf("error cleaning subpath mount %s: %s", fullSubPath, err) } - glog.V(5).Infof("Removed %s", fullSubPath) + + klog.V(4).Infof("Successfully cleaned subpath directory %s", fullSubPath) return nil } @@ -972,7 +970,7 @@ func removeEmptyDirs(baseDir, endDir string) error { s, err := os.Stat(curDir) if err != nil { if os.IsNotExist(err) { - glog.V(5).Infof("curDir %q doesn't exist, skipping", curDir) + klog.V(5).Infof("curDir %q doesn't exist, skipping", curDir) continue } return fmt.Errorf("error stat %q: %v", curDir, err) @@ -983,12 +981,12 @@ func removeEmptyDirs(baseDir, endDir string) error { err = os.Remove(curDir) if os.IsExist(err) { - glog.V(5).Infof("Directory %q not empty, not removing", curDir) + klog.V(5).Infof("Directory %q not empty, not removing", curDir) break } else if err != nil { return fmt.Errorf("error removing directory %q: %v", curDir, err) } - glog.V(5).Infof("Removed directory %q", curDir) + klog.V(5).Infof("Removed directory %q", curDir) } return nil } @@ -1005,10 +1003,14 @@ func (mounter *Mounter) SafeMakeDir(subdir string, base string, perm os.FileMode } func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { - if _, err := os.Stat(pathname); os.IsNotExist(err) { + pathExists, pathErr := PathExists(pathname) + if !pathExists { return []string{}, nil - } else if err != nil { - return nil, err + } else if IsCorruptedMnt(pathErr) { + klog.Warningf("GetMountRefs found corrupted mount at %s, treating as unmounted path", pathname) + return []string{}, nil + } else if pathErr != nil { + return nil, fmt.Errorf("error checking path %s: %v", pathname, pathErr) } realpath, err := filepath.EvalSymlinks(pathname) if err != nil { @@ -1055,7 +1057,7 @@ func getMode(pathname string) (os.FileMode, error) { // and base must be either already resolved symlinks or thet will be resolved in // kubelet's mount namespace (in case it runs containerized). func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { - glog.V(4).Infof("Creating directory %q within base %q", pathname, base) + klog.V(4).Infof("Creating directory %q within base %q", pathname, base) if !PathWithinBase(pathname, base) { return fmt.Errorf("path %s is outside of allowed base %s", pathname, base) @@ -1068,7 +1070,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { if s.IsDir() { // The directory already exists. It can be outside of the parent, // but there is no race-proof check. - glog.V(4).Infof("Directory %s already exists", pathname) + klog.V(4).Infof("Directory %s already exists", pathname) return nil } return &os.PathError{Op: "mkdir", Path: pathname, Err: syscall.ENOTDIR} @@ -1088,7 +1090,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { return fmt.Errorf("path %s is outside of allowed base %s", fullExistingPath, err) } - glog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) + klog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) parentFD, err := doSafeOpen(fullExistingPath, base) if err != nil { return fmt.Errorf("cannot open directory %s: %s", existingPath, err) @@ -1097,12 +1099,12 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { defer func() { if parentFD != -1 { if err = syscall.Close(parentFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) } } if childFD != -1 { if err = syscall.Close(childFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", childFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", childFD, pathname, err) } } }() @@ -1112,7 +1114,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { // created directory into symlink. for _, dir := range toCreate { currentPath = filepath.Join(currentPath, dir) - glog.V(4).Infof("Creating %s", dir) + klog.V(4).Infof("Creating %s", dir) err = syscall.Mkdirat(parentFD, currentPath, uint32(perm)) if err != nil { return fmt.Errorf("cannot create directory %s: %s", currentPath, err) @@ -1131,7 +1133,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { // and user either gets error or the file that it can already access. if err = syscall.Close(parentFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) } parentFD = childFD childFD = -1 @@ -1180,7 +1182,7 @@ func findExistingPrefix(base, pathname string) (string, []string, error) { } defer func() { if err = syscall.Close(fd); err != nil { - glog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) + klog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) } }() for i, dir := range dirs { @@ -1194,7 +1196,7 @@ func findExistingPrefix(base, pathname string) (string, []string, error) { return base, nil, err } if err = syscall.Close(fd); err != nil { - glog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) + klog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) } fd = childFD currentPath = filepath.Join(currentPath, dir) @@ -1226,7 +1228,7 @@ func doSafeOpen(pathname string, base string) (int, error) { defer func() { if parentFD != -1 { if err = syscall.Close(parentFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", parentFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", parentFD, pathname, err) } } }() @@ -1235,7 +1237,7 @@ func doSafeOpen(pathname string, base string) (int, error) { defer func() { if childFD != -1 { if err = syscall.Close(childFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", childFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", childFD, pathname, err) } } }() @@ -1250,7 +1252,7 @@ func doSafeOpen(pathname string, base string) (int, error) { return -1, fmt.Errorf("path %s is outside of allowed base %s", currentPath, base) } - glog.V(5).Infof("Opening path %s", currentPath) + klog.V(5).Infof("Opening path %s", currentPath) childFD, err = syscall.Openat(parentFD, seg, openFDFlags, 0) if err != nil { return -1, fmt.Errorf("cannot open %s: %s", currentPath, err) diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go index 7f6281b8888be..0930330c16186 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go @@ -28,7 +28,7 @@ import ( "strings" "syscall" - "github.com/golang/glog" + "k8s.io/klog" utilfile "k8s.io/kubernetes/pkg/util/file" ) @@ -49,12 +49,13 @@ func New(mounterPath string) Interface { } } -// Mount : mounts source to target as NTFS with given options. +// Mount : mounts source to target with given options. +// currently only supports cifs(smb), bind mount(for disk) func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error { target = normalizeWindowsPath(target) if source == "tmpfs" { - glog.V(3).Infof("azureMount: mounting source (%q), target (%q), with options (%q)", source, target, options) + klog.V(3).Infof("mounting source (%q), target (%q), with options (%q)", source, target, options) return os.MkdirAll(target, 0755) } @@ -63,58 +64,93 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio return err } - glog.V(4).Infof("azureMount: mount options(%q) source:%q, target:%q, fstype:%q, begin to mount", + klog.V(4).Infof("mount options(%q) source:%q, target:%q, fstype:%q, begin to mount", options, source, target, fstype) - bindSource := "" + bindSource := source // tell it's going to mount azure disk or azure file according to options - if bind, _ := isBind(options); bind { + if bind, _, _ := isBind(options); bind { // mount azure disk bindSource = normalizeWindowsPath(source) } else { if len(options) < 2 { - glog.Warningf("azureMount: mount options(%q) command number(%d) less than 2, source:%q, target:%q, skip mounting", + klog.Warningf("mount options(%q) command number(%d) less than 2, source:%q, target:%q, skip mounting", options, len(options), source, target) return nil } // currently only cifs mount is supported if strings.ToLower(fstype) != "cifs" { - return fmt.Errorf("azureMount: only cifs mount is supported now, fstype: %q, mounting source (%q), target (%q), with options (%q)", fstype, source, target, options) + return fmt.Errorf("only cifs mount is supported now, fstype: %q, mounting source (%q), target (%q), with options (%q)", fstype, source, target, options) } - bindSource = source - - // use PowerShell Environment Variables to store user input string to prevent command line injection - // https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_environment_variables?view=powershell-5.1 - cmdLine := fmt.Sprintf(`$PWord = ConvertTo-SecureString -String $Env:smbpassword -AsPlainText -Force` + - `;$Credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $Env:smbuser, $PWord` + - `;New-SmbGlobalMapping -RemotePath $Env:smbremotepath -Credential $Credential`) - - cmd := exec.Command("powershell", "/c", cmdLine) - cmd.Env = append(os.Environ(), - fmt.Sprintf("smbuser=%s", options[0]), - fmt.Sprintf("smbpassword=%s", options[1]), - fmt.Sprintf("smbremotepath=%s", source)) - if output, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("azureMount: SmbGlobalMapping failed: %v, only SMB mount is supported now, output: %q", err, string(output)) + if output, err := newSMBMapping(options[0], options[1], source); err != nil { + if isSMBMappingExist(source) { + klog.V(2).Infof("SMB Mapping(%s) already exists, now begin to remove and remount", source) + if output, err := removeSMBMapping(source); err != nil { + return fmt.Errorf("Remove-SmbGlobalMapping failed: %v, output: %q", err, output) + } + if output, err := newSMBMapping(options[0], options[1], source); err != nil { + return fmt.Errorf("New-SmbGlobalMapping remount failed: %v, output: %q", err, output) + } + } else { + return fmt.Errorf("New-SmbGlobalMapping failed: %v, output: %q", err, output) + } } } if output, err := exec.Command("cmd", "/c", "mklink", "/D", target, bindSource).CombinedOutput(); err != nil { - glog.Errorf("mklink failed: %v, source(%q) target(%q) output: %q", err, bindSource, target, string(output)) + klog.Errorf("mklink failed: %v, source(%q) target(%q) output: %q", err, bindSource, target, string(output)) return err } return nil } +// do the SMB mount with username, password, remotepath +// return (output, error) +func newSMBMapping(username, password, remotepath string) (string, error) { + if username == "" || password == "" || remotepath == "" { + return "", fmt.Errorf("invalid parameter(username: %s, password: %s, remoteapth: %s)", username, password, remotepath) + } + + // use PowerShell Environment Variables to store user input string to prevent command line injection + // https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_environment_variables?view=powershell-5.1 + cmdLine := `$PWord = ConvertTo-SecureString -String $Env:smbpassword -AsPlainText -Force` + + `;$Credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $Env:smbuser, $PWord` + + `;New-SmbGlobalMapping -RemotePath $Env:smbremotepath -Credential $Credential` + cmd := exec.Command("powershell", "/c", cmdLine) + cmd.Env = append(os.Environ(), + fmt.Sprintf("smbuser=%s", username), + fmt.Sprintf("smbpassword=%s", password), + fmt.Sprintf("smbremotepath=%s", remotepath)) + + output, err := cmd.CombinedOutput() + return string(output), err +} + +// check whether remotepath is already mounted +func isSMBMappingExist(remotepath string) bool { + cmd := exec.Command("powershell", "/c", `Get-SmbGlobalMapping -RemotePath $Env:smbremotepath`) + cmd.Env = append(os.Environ(), fmt.Sprintf("smbremotepath=%s", remotepath)) + _, err := cmd.CombinedOutput() + return err == nil +} + +// remove SMB mapping +func removeSMBMapping(remotepath string) (string, error) { + cmd := exec.Command("powershell", "/c", `Remove-SmbGlobalMapping -RemotePath $Env:smbremotepath -Force`) + cmd.Env = append(os.Environ(), fmt.Sprintf("smbremotepath=%s", remotepath)) + output, err := cmd.CombinedOutput() + return string(output), err +} + // Unmount unmounts the target. func (mounter *Mounter) Unmount(target string) error { - glog.V(4).Infof("azureMount: Unmount target (%q)", target) + klog.V(4).Infof("azureMount: Unmount target (%q)", target) target = normalizeWindowsPath(target) if output, err := exec.Command("cmd", "/c", "rmdir", target).CombinedOutput(); err != nil { - glog.Errorf("rmdir failed: %v, output: %q", err, string(output)) + klog.Errorf("rmdir failed: %v, output: %q", err, string(output)) return err } return nil @@ -168,7 +204,7 @@ func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (str func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) { refs, err := mounter.GetMountRefs(mountPath) if err != nil { - glog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) + klog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) return "", err } if len(refs) == 0 { @@ -179,7 +215,7 @@ func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (str if strings.Contains(ref, basemountPath) { volumeID, err := filepath.Rel(normalizeWindowsPath(basemountPath), ref) if err != nil { - glog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) + klog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) return "", err } return volumeID, nil @@ -362,10 +398,10 @@ func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error { func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error { // Try to mount the disk - glog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, source, target) + klog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, source, target) if err := ValidateDiskNumber(source); err != nil { - glog.Errorf("diskMount: formatAndMount failed, err: %v", err) + klog.Errorf("diskMount: formatAndMount failed, err: %v", err) return err } @@ -380,7 +416,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, if output, err := mounter.Exec.Run("powershell", "/c", cmd); err != nil { return fmt.Errorf("diskMount: format disk failed, error: %v, output: %q", err, string(output)) } - glog.V(4).Infof("diskMount: Disk successfully formatted, disk: %q, fstype: %q", source, fstype) + klog.V(4).Infof("diskMount: Disk successfully formatted, disk: %q, fstype: %q", source, fstype) driveLetter, err := getDriveLetterByDiskNumber(source, mounter.Exec) if err != nil { @@ -388,9 +424,9 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, } driverPath := driveLetter + ":" target = normalizeWindowsPath(target) - glog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, driverPath, target) + klog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, driverPath, target) if output, err := mounter.Exec.Run("cmd", "/c", "mklink", "/D", target, driverPath); err != nil { - glog.Errorf("mklink failed: %v, output: %q", err, string(output)) + klog.Errorf("mklink failed: %v, output: %q", err, string(output)) return err } return nil @@ -460,10 +496,14 @@ func getAllParentLinks(path string) ([]string, error) { // GetMountRefs : empty implementation here since there is no place to query all mount points on Windows func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { - if _, err := os.Stat(normalizeWindowsPath(pathname)); os.IsNotExist(err) { + pathExists, pathErr := PathExists(normalizeWindowsPath(pathname)) + // TODO(#75012): Need a Windows specific IsCorruptedMnt function that checks against whatever errno's + // Windows emits when we try to Stat a corrupted mount + // https://golang.org/pkg/syscall/?GOOS=windows&GOARCH=amd64#Errno + if !pathExists { return []string{}, nil - } else if err != nil { - return nil, err + } else if pathErr != nil { + return nil, fmt.Errorf("error checking path %s: %v", normalizeWindowsPath(pathname), pathErr) } return []string{pathname}, nil } @@ -499,7 +539,7 @@ func (mounter *Mounter) SafeMakeDir(subdir string, base string, perm os.FileMode } func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { - glog.V(4).Infof("Creating directory %q within base %q", pathname, base) + klog.V(4).Infof("Creating directory %q within base %q", pathname, base) if !PathWithinBase(pathname, base) { return fmt.Errorf("path %s is outside of allowed base %s", pathname, base) @@ -512,7 +552,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { if s.IsDir() { // The directory already exists. It can be outside of the parent, // but there is no race-proof check. - glog.V(4).Infof("Directory %s already exists", pathname) + klog.V(4).Infof("Directory %s already exists", pathname) return nil } return &os.PathError{Op: "mkdir", Path: pathname, Err: syscall.ENOTDIR} @@ -547,13 +587,13 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { return err } - glog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) + klog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) currentPath := fullExistingPath // create the directories one by one, making sure nobody can change // created directory into symlink by lock that directory immediately for _, dir := range toCreate { currentPath = filepath.Join(currentPath, dir) - glog.V(4).Infof("Creating %s", dir) + klog.V(4).Infof("Creating %s", dir) if err := os.Mkdir(currentPath, perm); err != nil { return fmt.Errorf("cannot create directory %s: %s", currentPath, err) } diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go b/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go index d627a8f1fe448..5f013bb7ceca4 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go @@ -25,8 +25,8 @@ import ( "strings" "syscall" - "github.com/golang/glog" "golang.org/x/sys/unix" + "k8s.io/klog" utilfile "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/nsenter" ) @@ -61,10 +61,10 @@ var _ = Interface(&NsenterMounter{}) // Mount runs mount(8) in the host's root mount namespace. Aside from this // aspect, Mount has the same semantics as the mounter returned by mount.New() func (n *NsenterMounter) Mount(source string, target string, fstype string, options []string) error { - bind, bindRemountOpts := isBind(options) + bind, bindOpts, bindRemountOpts := isBind(options) if bind { - err := n.doNsenterMount(source, target, fstype, []string{"bind"}) + err := n.doNsenterMount(source, target, fstype, bindOpts) if err != nil { return err } @@ -77,11 +77,11 @@ func (n *NsenterMounter) Mount(source string, target string, fstype string, opti // doNsenterMount nsenters the host's mount namespace and performs the // requested mount. func (n *NsenterMounter) doNsenterMount(source, target, fstype string, options []string) error { - glog.V(5).Infof("nsenter mount %s %s %s %v", source, target, fstype, options) + klog.V(5).Infof("nsenter mount %s %s %s %v", source, target, fstype, options) cmd, args := n.makeNsenterArgs(source, target, fstype, options) outputBytes, err := n.ne.Exec(cmd, args).CombinedOutput() if len(outputBytes) != 0 { - glog.V(5).Infof("Output of mounting %s to %s: %v", source, target, string(outputBytes)) + klog.V(5).Infof("Output of mounting %s to %s: %v", source, target, string(outputBytes)) } return err } @@ -131,10 +131,10 @@ func (n *NsenterMounter) Unmount(target string) error { // No need to execute systemd-run here, it's enough that unmount is executed // in the host's mount namespace. It will finish appropriate fuse daemon(s) // running in any scope. - glog.V(5).Infof("nsenter unmount args: %v", args) + klog.V(5).Infof("nsenter unmount args: %v", args) outputBytes, err := n.ne.Exec("umount", args).CombinedOutput() if len(outputBytes) != 0 { - glog.V(5).Infof("Output of unmounting %s: %v", target, string(outputBytes)) + klog.V(5).Infof("Output of unmounting %s: %v", target, string(outputBytes)) } return err } @@ -163,18 +163,25 @@ func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { // Check the directory exists if _, err = os.Stat(file); os.IsNotExist(err) { - glog.V(5).Infof("findmnt: directory %s does not exist", file) + klog.V(5).Infof("findmnt: directory %s does not exist", file) return true, err } + + // Resolve any symlinks in file, kernel would do the same and use the resolved path in /proc/mounts + resolvedFile, err := n.EvalHostSymlinks(file) + if err != nil { + return true, err + } + // Add --first-only option: since we are testing for the absence of a mountpoint, it is sufficient to get only // the first of multiple possible mountpoints using --first-only. // Also add fstype output to make sure that the output of target file will give the full path // TODO: Need more refactoring for this function. Track the solution with issue #26996 - args := []string{"-o", "target,fstype", "--noheadings", "--first-only", "--target", file} - glog.V(5).Infof("nsenter findmnt args: %v", args) + args := []string{"-o", "target,fstype", "--noheadings", "--first-only", "--target", resolvedFile} + klog.V(5).Infof("nsenter findmnt args: %v", args) out, err := n.ne.Exec("findmnt", args).CombinedOutput() if err != nil { - glog.V(2).Infof("Failed findmnt command for path %s: %s %v", file, out, err) + klog.V(2).Infof("Failed findmnt command for path %s: %s %v", resolvedFile, out, err) // Different operating systems behave differently for paths which are not mount points. // On older versions (e.g. 2.20.1) we'd get error, on newer ones (e.g. 2.26.2) we'd get "/". // It's safer to assume that it's not a mount point. @@ -185,13 +192,13 @@ func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { return false, err } - glog.V(5).Infof("IsLikelyNotMountPoint findmnt output for path %s: %v:", file, mountTarget) + klog.V(5).Infof("IsLikelyNotMountPoint findmnt output for path %s: %v:", resolvedFile, mountTarget) - if mountTarget == file { - glog.V(5).Infof("IsLikelyNotMountPoint: %s is a mount point", file) + if mountTarget == resolvedFile { + klog.V(5).Infof("IsLikelyNotMountPoint: %s is a mount point", resolvedFile) return false, nil } - glog.V(5).Infof("IsLikelyNotMountPoint: %s is not a mount point", file) + klog.V(5).Infof("IsLikelyNotMountPoint: %s is not a mount point", resolvedFile) return true, nil } @@ -337,12 +344,11 @@ func (mounter *NsenterMounter) SafeMakeDir(subdir string, base string, perm os.F } func (mounter *NsenterMounter) GetMountRefs(pathname string) ([]string, error) { - exists, err := mounter.ExistsPath(pathname) - if err != nil { - return nil, err - } - if !exists { + pathExists, pathErr := PathExists(pathname) + if !pathExists || IsCorruptedMnt(pathErr) { return []string{}, nil + } else if pathErr != nil { + return nil, fmt.Errorf("Error checking path %s: %v", pathname, pathErr) } hostpath, err := mounter.ne.EvalSymlinks(pathname, true /* mustExist */) if err != nil { @@ -368,7 +374,7 @@ func doNsEnterBindSubPath(mounter *NsenterMounter, subpath Subpath) (hostPath st if err != nil { return "", fmt.Errorf("error resolving symlinks in %q: %v", subpath.Path, err) } - glog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, evaluatedHostSubpath, subpath.VolumePath) + klog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, evaluatedHostSubpath, subpath.VolumePath) subpath.VolumePath = mounter.ne.KubeletPath(evaluatedHostVolumePath) subpath.Path = mounter.ne.KubeletPath(evaluatedHostSubpath) @@ -391,9 +397,9 @@ func doNsEnterBindSubPath(mounter *NsenterMounter, subpath Subpath) (hostPath st defer func() { // Cleanup subpath on error if !success { - glog.V(4).Infof("doNsEnterBindSubPath() failed for %q, cleaning up subpath", bindPathTarget) + klog.V(4).Infof("doNsEnterBindSubPath() failed for %q, cleaning up subpath", bindPathTarget) if cleanErr := cleanSubPath(mounter, subpath); cleanErr != nil { - glog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr) + klog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr) } } }() @@ -401,7 +407,7 @@ func doNsEnterBindSubPath(mounter *NsenterMounter, subpath Subpath) (hostPath st // Leap of faith: optimistically expect that nobody has modified previously // expanded evalSubPath with evil symlinks and bind-mount it. // Mount is done on the host! don't use kubelet path! - glog.V(5).Infof("bind mounting %q at %q", evaluatedHostSubpath, bindPathTarget) + klog.V(5).Infof("bind mounting %q at %q", evaluatedHostSubpath, bindPathTarget) if err = mounter.Mount(evaluatedHostSubpath, bindPathTarget, "" /*fstype*/, []string{"bind"}); err != nil { return "", fmt.Errorf("error mounting %s: %s", evaluatedHostSubpath, err) } @@ -414,7 +420,7 @@ func doNsEnterBindSubPath(mounter *NsenterMounter, subpath Subpath) (hostPath st } success = true - glog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget) + klog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget) return bindPathTarget, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/util/node/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/util/node/BUILD.bazel index d3400af75e7bd..9a4f70ea86b1e 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/node/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/util/node/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubelet/apis:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/node/node.go b/vendor/k8s.io/kubernetes/pkg/util/node/node.go index ddb29aac2b746..ff50385515076 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/node/node.go +++ b/vendor/k8s.io/kubernetes/pkg/util/node/node.go @@ -24,6 +24,8 @@ import ( "strings" "time" + "k8s.io/klog" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -34,9 +36,11 @@ import ( ) const ( - // The reason and message set on a pod when its state cannot be confirmed as kubelet is unresponsive + // NodeUnreachablePodReason is the reason on a pod when its state cannot be confirmed as kubelet is unresponsive + // on the node it is (was) running. + NodeUnreachablePodReason = "NodeLost" + // NodeUnreachablePodMessage is the message on a pod when its state cannot be confirmed as kubelet is unresponsive // on the node it is (was) running. - NodeUnreachablePodReason = "NodeLost" NodeUnreachablePodMessage = "Node %v which was running pod %v is unresponsive" ) @@ -91,6 +95,22 @@ func GetNodeHostIP(node *v1.Node) (net.IP, error) { return nil, fmt.Errorf("host IP unknown; known addresses: %v", addresses) } +// GetNodeIP returns the ip of node with the provided hostname +func GetNodeIP(client clientset.Interface, hostname string) net.IP { + var nodeIP net.IP + node, err := client.CoreV1().Nodes().Get(hostname, metav1.GetOptions{}) + if err != nil { + klog.Warningf("Failed to retrieve node info: %v", err) + return nil + } + nodeIP, err = GetNodeHostIP(node) + if err != nil { + klog.Warningf("Failed to retrieve node IP: %v", err) + return nil + } + return nodeIP +} + // GetZoneKey is a helper function that builds a string identifier that is unique per failure-zone; // it returns empty-string for no zone. func GetZoneKey(node *v1.Node) string { diff --git a/vendor/k8s.io/kubernetes/pkg/util/nsenter/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/util/nsenter/BUILD.bazel index e66724e488ecf..bc81b41237f7f 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/nsenter/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/util/nsenter/BUILD.bazel @@ -25,7 +25,7 @@ go_library( "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ diff --git a/vendor/k8s.io/kubernetes/pkg/util/nsenter/exec.go b/vendor/k8s.io/kubernetes/pkg/util/nsenter/exec.go index 201f1270c7725..134497f0a752a 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/nsenter/exec.go +++ b/vendor/k8s.io/kubernetes/pkg/util/nsenter/exec.go @@ -23,7 +23,7 @@ import ( "fmt" "path/filepath" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/utils/exec" ) @@ -49,7 +49,7 @@ func NewNsenterExecutor(hostRootFsPath string, executor exec.Interface) *Executo func (nsExecutor *Executor) Command(cmd string, args ...string) exec.Cmd { fullArgs := append([]string{fmt.Sprintf("--mount=%s", nsExecutor.hostProcMountNsPath), "--"}, append([]string{cmd}, args...)...) - glog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) + klog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) return nsExecutor.executor.Command(nsenterPath, fullArgs...) } @@ -57,7 +57,7 @@ func (nsExecutor *Executor) Command(cmd string, args ...string) exec.Cmd { func (nsExecutor *Executor) CommandContext(ctx context.Context, cmd string, args ...string) exec.Cmd { fullArgs := append([]string{fmt.Sprintf("--mount=%s", nsExecutor.hostProcMountNsPath), "--"}, append([]string{cmd}, args...)...) - glog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) + klog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) return nsExecutor.executor.CommandContext(ctx, nsenterPath, fullArgs...) } diff --git a/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter.go b/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter.go index e928a57ac9fe0..56361e7846e7e 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter.go +++ b/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter.go @@ -28,7 +28,7 @@ import ( "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -127,7 +127,7 @@ func (ne *Nsenter) Exec(cmd string, args []string) exec.Cmd { hostProcMountNsPath := filepath.Join(ne.hostRootFsPath, mountNsPath) fullArgs := append([]string{fmt.Sprintf("--mount=%s", hostProcMountNsPath), "--"}, append([]string{ne.AbsHostPath(cmd)}, args...)...) - glog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) + klog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) return ne.executor.Command(nsenterPath, fullArgs...) } @@ -170,7 +170,7 @@ func (ne *Nsenter) EvalSymlinks(pathname string, mustExist bool) (string, error) } outBytes, err := ne.Exec("realpath", args).CombinedOutput() if err != nil { - glog.Infof("failed to resolve symbolic links on %s: %v", pathname, err) + klog.Infof("failed to resolve symbolic links on %s: %v", pathname, err) return "", err } return strings.TrimSpace(string(outBytes)), nil diff --git a/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD.bazel new file mode 100644 index 0000000000000..f9ba1406f72f3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD.bazel @@ -0,0 +1,49 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "resizefs_linux.go", + "resizefs_unsupported.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/util/resizefs", + importpath = "k8s.io/kubernetes/pkg/util/resizefs", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go b/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go new file mode 100644 index 0000000000000..4eabdb1ddc0b1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go @@ -0,0 +1,86 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resizefs + +import ( + "fmt" + + "k8s.io/klog" + "k8s.io/kubernetes/pkg/util/mount" +) + +// ResizeFs Provides support for resizing file systems +type ResizeFs struct { + mounter *mount.SafeFormatAndMount +} + +// NewResizeFs returns new instance of resizer +func NewResizeFs(mounter *mount.SafeFormatAndMount) *ResizeFs { + return &ResizeFs{mounter: mounter} +} + +// Resize perform resize of file system +func (resizefs *ResizeFs) Resize(devicePath string, deviceMountPath string) (bool, error) { + format, err := resizefs.mounter.GetDiskFormat(devicePath) + + if err != nil { + formatErr := fmt.Errorf("ResizeFS.Resize - error checking format for device %s: %v", devicePath, err) + return false, formatErr + } + + // If disk has no format, there is no need to resize the disk because mkfs.* + // by default will use whole disk anyways. + if format == "" { + return false, nil + } + + klog.V(3).Infof("ResizeFS.Resize - Expanding mounted volume %s", devicePath) + switch format { + case "ext3", "ext4": + return resizefs.extResize(devicePath) + case "xfs": + return resizefs.xfsResize(deviceMountPath) + } + return false, fmt.Errorf("ResizeFS.Resize - resize of format %s is not supported for device %s mounted at %s", format, devicePath, deviceMountPath) +} + +func (resizefs *ResizeFs) extResize(devicePath string) (bool, error) { + output, err := resizefs.mounter.Exec.Run("resize2fs", devicePath) + if err == nil { + klog.V(2).Infof("Device %s resized successfully", devicePath) + return true, nil + } + + resizeError := fmt.Errorf("resize of device %s failed: %v. resize2fs output: %s", devicePath, err, string(output)) + return false, resizeError + +} + +func (resizefs *ResizeFs) xfsResize(deviceMountPath string) (bool, error) { + args := []string{"-d", deviceMountPath} + output, err := resizefs.mounter.Exec.Run("xfs_growfs", args...) + + if err == nil { + klog.V(2).Infof("Device %s resized successfully", deviceMountPath) + return true, nil + } + + resizeError := fmt.Errorf("resize of device %s failed: %v. xfs_growfs output: %s", deviceMountPath, err, string(output)) + return false, resizeError +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/error.go b/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_unsupported.go similarity index 51% rename from vendor/k8s.io/kubernetes/pkg/kubectl/apply/error.go rename to vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_unsupported.go index 2e7a3d49976b2..dd4dd017e829e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply/error.go +++ b/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_unsupported.go @@ -1,3 +1,5 @@ +// +build !linux + /* Copyright 2017 The Kubernetes Authors. @@ -14,24 +16,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -package apply +package resizefs + +import ( + "fmt" -import "fmt" + "k8s.io/kubernetes/pkg/util/mount" +) -// ConflictError represents a conflict error occurred during the merge operation. -type ConflictError struct { - element Element +// ResizeFs Provides support for resizing file systems +type ResizeFs struct { + mounter *mount.SafeFormatAndMount } -// NewConflictError returns a ConflictError with detailed conflict information in element -func NewConflictError(e PrimitiveElement) *ConflictError { - return &ConflictError{ - element: e, - } +// NewResizeFs returns new instance of resizer +func NewResizeFs(mounter *mount.SafeFormatAndMount) *ResizeFs { + return &ResizeFs{mounter: mounter} } -// Error implements error -func (c *ConflictError) Error() string { - return fmt.Sprintf("conflict detected, recorded value (%+v) and remote value (%+v)", - c.element.GetRecorded(), c.element.GetRemote()) +// Resize perform resize of file system +func (resizefs *ResizeFs) Resize(devicePath string, deviceMountPath string) (bool, error) { + return false, fmt.Errorf("Resize is not supported for this build") } diff --git a/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD.bazel deleted file mode 100644 index 3e10dec0328ce..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["slice.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/util/slice", - importpath = "k8s.io/kubernetes/pkg/util/slice", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go b/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go deleted file mode 100644 index b9809cc2972cb..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package slice provides utility methods for common operations on slices. -package slice - -import ( - "sort" - - utilrand "k8s.io/apimachinery/pkg/util/rand" -) - -// CopyStrings copies the contents of the specified string slice -// into a new slice. -func CopyStrings(s []string) []string { - if s == nil { - return nil - } - c := make([]string, len(s)) - copy(c, s) - return c -} - -// SortStrings sorts the specified string slice in place. It returns the same -// slice that was provided in order to facilitate method chaining. -func SortStrings(s []string) []string { - sort.Strings(s) - return s -} - -// ShuffleStrings copies strings from the specified slice into a copy in random -// order. It returns a new slice. -func ShuffleStrings(s []string) []string { - if s == nil { - return nil - } - shuffled := make([]string, len(s)) - perm := utilrand.Perm(len(s)) - for i, j := range perm { - shuffled[j] = s[i] - } - return shuffled -} - -// ContainsString checks if a given slice of strings contains the provided string. -// If a modifier func is provided, it is called with the slice item before the comparation. -func ContainsString(slice []string, s string, modifier func(s string) string) bool { - for _, item := range slice { - if item == s { - return true - } - if modifier != nil && modifier(item) == s { - return true - } - } - return false -} - -// RemoveString returns a newly created []string that contains all items from slice that -// are not equal to s and modifier(s) in case modifier func is provided. -func RemoveString(slice []string, s string, modifier func(s string) string) []string { - newSlice := make([]string, 0) - for _, item := range slice { - if item == s { - continue - } - if modifier != nil && modifier(item) == s { - continue - } - newSlice = append(newSlice, item) - } - if len(newSlice) == 0 { - // Sanitize for unit tests so we don't need to distinguish empty array - // and nil. - newSlice = nil - } - return newSlice -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/strings/strings.go b/vendor/k8s.io/kubernetes/pkg/util/strings/strings.go index 1015671aa917f..29be3170a2505 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/strings/strings.go +++ b/vendor/k8s.io/kubernetes/pkg/util/strings/strings.go @@ -22,7 +22,7 @@ import ( "unicode" ) -// Splits a fully qualified name and returns its namespace and name. +// SplitQualifiedName splits a fully qualified name and returns its namespace and name. // Assumes that the input 'str' has been validated. func SplitQualifiedName(str string) (string, string) { parts := strings.Split(str, "/") @@ -32,19 +32,18 @@ func SplitQualifiedName(str string) (string, string) { return parts[0], parts[1] } -// Joins 'namespace' and 'name' and returns a fully qualified name +// JoinQualifiedName joins 'namespace' and 'name' and returns a fully qualified name // Assumes that the input is valid. func JoinQualifiedName(namespace, name string) string { return path.Join(namespace, name) } -// Returns the first N slice of a string. +// ShortenString returns the first N slice of a string. func ShortenString(str string, n int) string { if len(str) <= n { return str - } else { - return str[:n] } + return str[:n] } // isVowel returns true if the rune is a vowel (case insensitive). diff --git a/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go b/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go index 76e4bb866814e..0777f6d69223c 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go +++ b/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// package taints implements utilites for working with taints +// package taints implements utilities for working with taints package taints import ( @@ -240,7 +240,7 @@ func DeleteTaintsByKey(taints []v1.Taint, taintKey string) ([]v1.Taint, bool) { return newTaints, deleted } -// DeleteTaint removes all the the taints that have the same key and effect to given taintToDelete. +// DeleteTaint removes all the taints that have the same key and effect to given taintToDelete. func DeleteTaint(taints []v1.Taint, taintToDelete *v1.Taint) ([]v1.Taint, bool) { newTaints := []v1.Taint{} deleted := false diff --git a/vendor/k8s.io/kubernetes/pkg/version/doc.go b/vendor/k8s.io/kubernetes/pkg/version/doc.go index e4d68bc9f497c..a4a1c035fc8d1 100644 --- a/vendor/k8s.io/kubernetes/pkg/version/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/version/doc.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:openapi-gen=true + // Package version supplies version information collected at build time to // kubernetes components. -// +k8s:openapi-gen=true package version // import "k8s.io/kubernetes/pkg/version" diff --git a/vendor/k8s.io/kubernetes/pkg/volume/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/volume/BUILD.bazel index 482e084f1c54f..fe6bf14f7d740 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/volume/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "metrics_errors.go", "metrics_nil.go", "metrics_statfs.go", + "noop_expandable_plugin.go", "plugins.go", "volume.go", "volume_linux.go", @@ -18,7 +19,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -28,8 +28,9 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/cloud-provider:go_default_library", "//vendor/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/cloudprovider:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", "//vendor/k8s.io/kubernetes/pkg/volume/util/fs:go_default_library", "//vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/noop_expandable_plugin.go b/vendor/k8s.io/kubernetes/pkg/volume/noop_expandable_plugin.go new file mode 100644 index 0000000000000..3d3d5e1dfd75d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/noop_expandable_plugin.go @@ -0,0 +1,77 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volume + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" +) + +type noopExpandableVolumePluginInstance struct { + spec *Spec +} + +var _ ExpandableVolumePlugin = &noopExpandableVolumePluginInstance{} + +func (n *noopExpandableVolumePluginInstance) ExpandVolumeDevice(spec *Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { + return newSize, nil +} + +func (n *noopExpandableVolumePluginInstance) Init(host VolumeHost) error { + return nil +} + +func (n *noopExpandableVolumePluginInstance) GetPluginName() string { + return n.spec.KubeletExpandablePluginName() +} + +func (n *noopExpandableVolumePluginInstance) GetVolumeName(spec *Spec) (string, error) { + return n.spec.Name(), nil +} + +func (n *noopExpandableVolumePluginInstance) CanSupport(spec *Spec) bool { + return true +} + +func (n *noopExpandableVolumePluginInstance) RequiresRemount() bool { + return false +} + +func (n *noopExpandableVolumePluginInstance) NewMounter(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (Mounter, error) { + return nil, nil +} + +func (n *noopExpandableVolumePluginInstance) NewUnmounter(name string, podUID types.UID) (Unmounter, error) { + return nil, nil +} + +func (n *noopExpandableVolumePluginInstance) ConstructVolumeSpec(volumeName, mountPath string) (*Spec, error) { + return n.spec, nil +} + +func (n *noopExpandableVolumePluginInstance) SupportsMountOption() bool { + return true +} + +func (n *noopExpandableVolumePluginInstance) SupportsBulkVolumeVerification() bool { + return false +} + +func (n *noopExpandableVolumePluginInstance) RequiresFSResize() bool { + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index e9ccf7e296772..5190e1c919c12 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -22,7 +22,6 @@ import ( "strings" "sync" - "github.com/golang/glog" authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -32,8 +31,9 @@ import ( "k8s.io/apimachinery/pkg/util/validation" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" + cloudprovider "k8s.io/cloud-provider" csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" - "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume/util/recyclerclient" ) @@ -135,10 +135,10 @@ type VolumePlugin interface { NewUnmounter(name string, podUID types.UID) (Unmounter, error) // ConstructVolumeSpec constructs a volume spec based on the given volume name - // and mountPath. The spec may have incomplete information due to limited + // and volumePath. The spec may have incomplete information due to limited // information from input. This function is used by volume manager to reconstruct // volume spec by reading the volume directories from disk - ConstructVolumeSpec(volumeName, mountPath string) (*Spec, error) + ConstructVolumeSpec(volumeName, volumePath string) (*Spec, error) // SupportsMountOption returns true if volume plugins supports Mount options // Specifying mount options in a volume plugin that doesn't support @@ -225,6 +225,13 @@ type ExpandableVolumePlugin interface { RequiresFSResize() bool } +// FSResizableVolumePlugin is an extension of ExpandableVolumePlugin and is used for volumes (flex) +// that require extra steps on nodes for expansion to complete +type FSResizableVolumePlugin interface { + ExpandableVolumePlugin + ExpandFS(spec *Spec, devicePath, deviceMountPath string, newSize, oldSize resource.Quantity) error +} + // VolumePluginWithAttachLimits is an extended interface of VolumePlugin that restricts number of // volumes that can be attached to a node. type VolumePluginWithAttachLimits interface { @@ -268,7 +275,7 @@ type BlockVolumePlugin interface { // The spec may have incomplete information due to limited information // from input. This function is used by volume manager to reconstruct // volume spec by reading the volume directories from disk. - ConstructBlockVolumeSpec(podUID types.UID, volumeName, mountPath string) (*Spec, error) + ConstructBlockVolumeSpec(podUID types.UID, volumeName, volumePath string) (*Spec, error) } // VolumeHost is an interface that plugins can use to access the kubelet. @@ -347,6 +354,8 @@ type VolumeHost interface { GetServiceAccountTokenFunc() func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) + DeleteServiceAccountTokenFunc() func(podUID types.UID) + // Returns an interface that should be used to execute any utilities in volume plugins GetExec(pluginName string) mount.Exec @@ -388,6 +397,36 @@ func (spec *Spec) Name() string { } } +// IsKubeletExpandable returns true for volume types that can be expanded only by the node +// and not the controller. Currently Flex volume is the only one in this category since +// it is typically not installed on the controller +func (spec *Spec) IsKubeletExpandable() bool { + switch { + case spec.Volume != nil: + return spec.Volume.FlexVolume != nil + case spec.PersistentVolume != nil: + return spec.PersistentVolume.Spec.FlexVolume != nil + default: + return false + + } +} + +// KubeletExpandablePluginName creates and returns a name for the plugin +// this is used in context on the controller where the plugin lookup fails +// as volume expansion on controller isn't supported, but a plugin name is +// required +func (spec *Spec) KubeletExpandablePluginName() string { + switch { + case spec.Volume != nil && spec.Volume.FlexVolume != nil: + return spec.Volume.FlexVolume.Driver + case spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil: + return spec.PersistentVolume.Spec.FlexVolume.Driver + default: + return "" + } +} + // VolumeConfig is how volume plugins receive configuration. An instance // specific to the plugin will be passed to the plugin's // ProbeVolumePlugins(config) func. Reasonable defaults will be provided by @@ -475,7 +514,7 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPlu } if err := pm.prober.Init(); err != nil { // Prober init failure should not affect the initialization of other plugins. - glog.Errorf("Error initializing dynamic plugin prober: %s", err) + klog.Errorf("Error initializing dynamic plugin prober: %s", err) pm.prober = &dummyPluginProber{} } @@ -500,12 +539,12 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPlu } err := plugin.Init(host) if err != nil { - glog.Errorf("Failed to load volume plugin %s, error: %s", name, err.Error()) + klog.Errorf("Failed to load volume plugin %s, error: %s", name, err.Error()) allErrs = append(allErrs, err) continue } pm.plugins[name] = plugin - glog.V(1).Infof("Loaded volume plugin %q", name) + klog.V(1).Infof("Loaded volume plugin %q", name) } return utilerrors.NewAggregate(allErrs) } @@ -521,7 +560,7 @@ func (pm *VolumePluginMgr) initProbedPlugin(probedPlugin VolumePlugin) error { return fmt.Errorf("Failed to load volume plugin %s, error: %s", name, err.Error()) } - glog.V(1).Infof("Loaded volume plugin %q", name) + klog.V(1).Infof("Loaded volume plugin %q", name) return nil } @@ -600,14 +639,14 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) { func (pm *VolumePluginMgr) refreshProbedPlugins() { events, err := pm.prober.Probe() if err != nil { - glog.Errorf("Error dynamically probing plugins: %s", err) + klog.Errorf("Error dynamically probing plugins: %s", err) return // Use cached plugins upon failure. } for _, event := range events { if event.Op == ProbeAddOrUpdate { if err := pm.initProbedPlugin(event.Plugin); err != nil { - glog.Errorf("Error initializing dynamically probed plugin %s; error: %s", + klog.Errorf("Error initializing dynamically probed plugin %s; error: %s", event.Plugin.GetPluginName(), err) continue } @@ -616,7 +655,7 @@ func (pm *VolumePluginMgr) refreshProbedPlugins() { // Plugin is not available on ProbeRemove event, only PluginName delete(pm.probedPlugins, event.PluginName) } else { - glog.Errorf("Unknown Operation on PluginName: %s.", + klog.Errorf("Unknown Operation on PluginName: %s.", event.Plugin.GetPluginName()) } } @@ -797,6 +836,13 @@ func (pm *VolumePluginMgr) FindDeviceMountablePluginByName(name string) (DeviceM func (pm *VolumePluginMgr) FindExpandablePluginBySpec(spec *Spec) (ExpandableVolumePlugin, error) { volumePlugin, err := pm.FindPluginBySpec(spec) if err != nil { + if spec.IsKubeletExpandable() { + // for kubelet expandable volumes, return a noop plugin that + // returns success for expand on the controller + klog.Warningf("FindExpandablePluginBySpec(%s) -> returning noopExpandableVolumePluginInstance", spec.Name()) + return &noopExpandableVolumePluginInstance{spec}, nil + } + klog.Warningf("FindExpandablePluginBySpec(%s) -> err:%v", spec.Name(), err) return nil, err } @@ -845,6 +891,32 @@ func (pm *VolumePluginMgr) FindMapperPluginByName(name string) (BlockVolumePlugi return nil, nil } +// FindFSResizablePluginBySpec fetches a persistent volume plugin by spec +func (pm *VolumePluginMgr) FindFSResizablePluginBySpec(spec *Spec) (FSResizableVolumePlugin, error) { + volumePlugin, err := pm.FindPluginBySpec(spec) + if err != nil { + return nil, err + } + if fsResizablePlugin, ok := volumePlugin.(FSResizableVolumePlugin); ok { + return fsResizablePlugin, nil + } + return nil, nil +} + +// FindFSResizablePluginByName fetches a persistent volume plugin by name +func (pm *VolumePluginMgr) FindFSResizablePluginByName(name string) (FSResizableVolumePlugin, error) { + volumePlugin, err := pm.FindPluginByName(name) + if err != nil { + return nil, err + } + + if fsResizablePlugin, ok := volumePlugin.(FSResizableVolumePlugin); ok { + return fsResizablePlugin, nil + } + + return nil, nil +} + // NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler // pod. By default, a recycler pod simply runs "rm -rf" on a volume and tests // for emptiness. Most attributes of the template will be correct for most diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD.bazel index dd07da31b4a89..f0963d2991550 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD.bazel @@ -21,7 +21,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/util", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", @@ -34,11 +33,13 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:go_default_library", "//vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper:go_default_library", "//vendor/k8s.io/kubernetes/pkg/features:go_default_library", "//vendor/k8s.io/kubernetes/pkg/kubelet/apis:go_default_library", "//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", + "//vendor/k8s.io/kubernetes/pkg/util/resizefs:go_default_library", "//vendor/k8s.io/kubernetes/pkg/util/strings:go_default_library", "//vendor/k8s.io/kubernetes/pkg/volume:go_default_library", "//vendor/k8s.io/kubernetes/pkg/volume/util/types:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go b/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go index 0613bef6d9f50..7b3d034012f4d 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go @@ -27,7 +27,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" ) @@ -61,6 +61,7 @@ type AtomicWriter struct { logContext string } +// FileProjection contains file Data and access Mode type FileProjection struct { Data []byte Mode int32 @@ -120,7 +121,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { // (1) cleanPayload, err := validatePayload(payload) if err != nil { - glog.Errorf("%s: invalid payload: %v", w.logContext, err) + klog.Errorf("%s: invalid payload: %v", w.logContext, err) return err } @@ -129,7 +130,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { oldTsDir, err := os.Readlink(dataDirPath) if err != nil { if !os.IsNotExist(err) { - glog.Errorf("%s: error reading link for data directory: %v", w.logContext, err) + klog.Errorf("%s: error reading link for data directory: %v", w.logContext, err) return err } // although Readlink() returns "" on err, don't be fragile by relying on it (since it's not specified in docs) @@ -144,41 +145,40 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { // (3) pathsToRemove, err = w.pathsToRemove(cleanPayload, oldTsPath) if err != nil { - glog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err) + klog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err) return err } // (4) if should, err := shouldWritePayload(cleanPayload, oldTsPath); err != nil { - glog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err) + klog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err) return err } else if !should && len(pathsToRemove) == 0 { - glog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir) + klog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir) return nil } else { - glog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir) + klog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir) } } // (5) tsDir, err := w.newTimestampDir() if err != nil { - glog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err) + klog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err) return err } tsDirName := filepath.Base(tsDir) // (6) if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil { - glog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err) + klog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err) return err - } else { - glog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir) } + klog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir) // (7) if err = w.createUserVisibleFiles(cleanPayload); err != nil { - glog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err) + klog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err) return err } @@ -186,7 +186,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { newDataDirPath := path.Join(w.targetDir, newDataDirName) if err = os.Symlink(tsDirName, newDataDirPath); err != nil { os.RemoveAll(tsDir) - glog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err) + klog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err) return err } @@ -201,20 +201,20 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { if err != nil { os.Remove(newDataDirPath) os.RemoveAll(tsDir) - glog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err) + klog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err) return err } // (10) if err = w.removeUserVisiblePaths(pathsToRemove); err != nil { - glog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err) + klog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err) return err } // (11) if len(oldTsDir) > 0 { if err = os.RemoveAll(oldTsPath); err != nil { - glog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err) + klog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err) return err } } @@ -222,7 +222,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { return nil } -// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned. +// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned. func validatePayload(payload map[string]FileProjection) (map[string]FileProjection, error) { cleanPayload := make(map[string]FileProjection) for k, content := range payload { @@ -329,7 +329,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir } else if err != nil { return nil, err } - glog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List()) + klog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List()) newPaths := sets.NewString() for file := range payload { @@ -341,10 +341,10 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir subPath = strings.TrimSuffix(subPath, string(os.PathSeparator)) } } - glog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List()) + klog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List()) result := paths.Difference(newPaths) - glog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result) + klog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result) return result, nil } @@ -353,7 +353,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir func (w *AtomicWriter) newTimestampDir() (string, error) { tsDir, err := ioutil.TempDir(w.targetDir, time.Now().UTC().Format("..2006_01_02_15_04_05.")) if err != nil { - glog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err) + klog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err) return "", err } @@ -362,7 +362,7 @@ func (w *AtomicWriter) newTimestampDir() (string, error) { // regardless of the process' umask. err = os.Chmod(tsDir, 0755) if err != nil { - glog.Errorf("%s: unable to set mode on new temp directory: %v", w.logContext, err) + klog.Errorf("%s: unable to set mode on new temp directory: %v", w.logContext, err) return "", err } @@ -380,13 +380,13 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir err := os.MkdirAll(baseDir, os.ModePerm) if err != nil { - glog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err) + klog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err) return err } err = ioutil.WriteFile(fullPath, content, mode) if err != nil { - glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err) + klog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err) return err } // Chmod is needed because ioutil.WriteFile() ends up calling @@ -395,7 +395,7 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir // in the file no matter what the umask is. err = os.Chmod(fullPath, mode) if err != nil { - glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err) + klog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err) } } @@ -445,7 +445,7 @@ func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error { continue } if err := os.Remove(path.Join(w.targetDir, p)); err != nil { - glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err) + klog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err) lasterr = err } } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go b/vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go index 079a015006457..8325dbf755b8c 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go @@ -33,7 +33,7 @@ const ( // Amazon recommends no more than 40; the system root volume uses at least one. // See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits DefaultMaxEBSVolumes = 39 - // DefaultMaxEBSM5VolumeLimit is default EBS volume limit on m5 and c5 instances + // DefaultMaxEBSNitroVolumeLimit is default EBS volume limit on m5 and c5 instances DefaultMaxEBSNitroVolumeLimit = 25 // AzureVolumeLimitKey stores resource name that will store volume limits for Azure AzureVolumeLimitKey = "attachable-volumes-azure-disk" diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go index f508399b1a0d5..1d0791ee05c12 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go @@ -25,10 +25,10 @@ type DeviceUtil interface { } type deviceHandler struct { - get_io IoUtil + getIo IoUtil } //NewDeviceHandler Create a new IoHandler implementation func NewDeviceHandler(io IoUtil) DeviceUtil { - return &deviceHandler{get_io: io} + return &deviceHandler{getIo: io} } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go index 2d1f43c83b000..66e8564915b07 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go @@ -21,7 +21,7 @@ package util import ( "errors" "fmt" - "github.com/golang/glog" + "k8s.io/klog" "path" "strconv" "strings" @@ -29,7 +29,7 @@ import ( // FindMultipathDeviceForDevice given a device name like /dev/sdx, find the devicemapper parent func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string { - io := handler.get_io + io := handler.getIo disk, err := findDeviceForPath(device, io) if err != nil { return "" @@ -68,7 +68,7 @@ func findDeviceForPath(path string, io IoUtil) (string, error) { // which are managed by the devicemapper dm-1. func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string { var devices []string - io := handler.get_io + io := handler.getIo // Split path /dev/dm-1 into "", "dev", "dm-1" parts := strings.Split(dm, "/") if len(parts) != 3 || !strings.HasPrefix(parts[1], "dev") { @@ -92,7 +92,7 @@ func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string { // } func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error) { portalHostMap := make(map[string]int) - io := handler.get_io + io := handler.getIo // Iterate over all the iSCSI hosts in sysfs sysPath := "/sys/class/iscsi_host" @@ -109,7 +109,7 @@ func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) ( } hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host")) if err != nil { - glog.Errorf("Could not get number from iSCSI host: %s", hostName) + klog.Errorf("Could not get number from iSCSI host: %s", hostName) continue } @@ -205,7 +205,7 @@ func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) ( // corresponding to that LUN. func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error) { devices := make([]string, 0) - io := handler.get_io + io := handler.getIo // Iterate over all the iSCSI hosts in sysfs sysPath := "/sys/class/iscsi_host" @@ -222,7 +222,7 @@ func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) } hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host")) if err != nil { - glog.Errorf("Could not get number from iSCSI host: %s", hostName) + klog.Errorf("Could not get number from iSCSI host: %s", hostName) continue } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/doc.go b/vendor/k8s.io/kubernetes/pkg/volume/util/doc.go index 620add69dff1d..b32dae216e6d5 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Contains utility code for use by volume plugins. +// Package util contains utility code for use by volume plugins. package util // import "k8s.io/kubernetes/pkg/volume/util" diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/error.go b/vendor/k8s.io/kubernetes/pkg/volume/util/error.go index 2c9655866b04f..a52a1b65ce27d 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/error.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/error.go @@ -20,7 +20,7 @@ import ( k8stypes "k8s.io/apimachinery/pkg/types" ) -// This error on attach indicates volume is attached to a different node +// DanglingAttachError indicates volume is attached to a different node // than we expected. type DanglingAttachError struct { msg string @@ -32,6 +32,7 @@ func (err *DanglingAttachError) Error() string { return err.msg } +// NewDanglingError create a new dangling error func NewDanglingError(msg string, node k8stypes.NodeName, devicePath string) error { return &DanglingAttachError{ msg: msg, diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go b/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go index 92d3c2bdd57b0..e1fdf5673c43b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go @@ -17,9 +17,9 @@ limitations under the License. package util const ( - // Name of finalizer on PVCs that have a running pod. + // PVCProtectionFinalizer is the name of finalizer on PVCs that have a running pod. PVCProtectionFinalizer = "kubernetes.io/pvc-protection" - // Name of finalizer on PVs that are bound by PVCs + // PVProtectionFinalizer is the name of finalizer on PVs that are bound by PVCs PVProtectionFinalizer = "kubernetes.io/pv-protection" ) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go b/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go index e3af12df89066..338e812663b55 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go @@ -17,9 +17,11 @@ limitations under the License. package util import ( + "fmt" "time" "github.com/prometheus/client_golang/prometheus" + "k8s.io/kubernetes/pkg/volume" ) var storageOperationMetric = prometheus.NewHistogramVec( @@ -62,3 +64,15 @@ func OperationCompleteHook(plugin, operationName string) func(*error) { } return opComplete } + +// GetFullQualifiedPluginNameForVolume returns full qualified plugin name for +// given volume. For CSI plugin, it appends plugin driver name at the end of +// plugin name, e.g. kubernetes.io/csi:csi-hostpath. It helps to distinguish +// between metrics emitted for CSI volumes which may be handled by different +// CSI plugin drivers. +func GetFullQualifiedPluginNameForVolume(pluginName string, spec *volume.Spec) string { + if spec != nil && spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CSI != nil { + return fmt.Sprintf("%s:%s", pluginName, spec.PersistentVolume.Spec.CSI.Driver) + } + return pluginName +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/BUILD.bazel index b76f672a3b21d..d8fe0167d5a5e 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/BUILD.bazel @@ -7,12 +7,12 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/util/recyclerclient", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go b/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go index 275f55bbe3364..c7a8f147f8782 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go @@ -20,13 +20,13 @@ import ( "fmt" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" ) type RecycleEventRecorder func(eventtype, message string) @@ -51,7 +51,7 @@ func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeC // same as above func comments, except 'recyclerClient' is a narrower pod API // interface to ease testing func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error { - glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name) + klog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name) // Generate unique name for the recycler pod - we need to get "already // exists" error when a previous controller has already started recycling @@ -63,7 +63,7 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po defer close(stopChannel) podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel) if err != nil { - glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err) + klog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err) return err } @@ -84,10 +84,10 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po err = waitForPod(pod, recyclerClient, podCh) // In all cases delete the recycler pod and log its result. - glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name) + klog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name) deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace) if deleteErr != nil { - glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err) + klog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err) } // Returning recycler error is preferred, the pod will be deleted again on @@ -117,7 +117,7 @@ func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.E case *v1.Pod: // POD changed pod := event.Object.(*v1.Pod) - glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase) + klog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase) switch event.Type { case watch.Added, watch.Modified: if pod.Status.Phase == v1.PodSucceeded { @@ -142,7 +142,7 @@ func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.E case *v1.Event: // Event received podEvent := event.Object.(*v1.Event) - glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message) + klog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message) if event.Type == watch.Added { recyclerClient.Event(podEvent.Type, podEvent.Message) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go index c1d2a1c82acdc..9090ff868623a 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go @@ -24,10 +24,13 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/util/resizefs" + "k8s.io/kubernetes/pkg/volume" ) var ( - knownResizeConditions map[v1.PersistentVolumeClaimConditionType]bool = map[v1.PersistentVolumeClaimConditionType]bool{ + knownResizeConditions = map[v1.PersistentVolumeClaimConditionType]bool{ v1.PersistentVolumeClaimFileSystemResizePending: true, v1.PersistentVolumeClaimResizing: true, } @@ -123,3 +126,14 @@ func MergeResizeConditionOnPVC( pvc.Status.Conditions = newConditions return pvc } + +// GenericResizeFS : call generic filesystem resizer for plugins that don't have any special filesystem resize requirements +func GenericResizeFS(host volume.VolumeHost, pluginName, devicePath, deviceMountPath string) (bool, error) { + mounter := host.GetMounter(pluginName) + diskFormatter := &mount.SafeFormatAndMount{ + Interface: mounter, + Exec: host.GetExec(pluginName), + } + resizer := resizefs.NewResizeFs(diskFormatter) + return resizer.Resize(devicePath, deviceMountPath) +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index 72d9a781e782a..18e24d69f0513 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -23,10 +23,8 @@ import ( "path" "path/filepath" "strings" - "syscall" - "github.com/golang/glog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -34,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/kubernetes/pkg/api/legacyscheme" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" @@ -101,7 +100,7 @@ func IsReady(dir string) bool { } if !s.Mode().IsRegular() { - glog.Errorf("ready-file is not a file: %s", readyFile) + klog.Errorf("ready-file is not a file: %s", readyFile) return false } @@ -113,14 +112,14 @@ func IsReady(dir string) bool { // created. func SetReady(dir string) { if err := os.MkdirAll(dir, 0750); err != nil && !os.IsExist(err) { - glog.Errorf("Can't mkdir %s: %v", dir, err) + klog.Errorf("Can't mkdir %s: %v", dir, err) return } readyFile := path.Join(dir, readyFileName) file, err := os.Create(readyFile) if err != nil { - glog.Errorf("Can't touch %s: %v", readyFile, err) + klog.Errorf("Can't touch %s: %v", readyFile, err) return } file.Close() @@ -128,8 +127,9 @@ func SetReady(dir string) { // UnmountPath is a common unmount routine that unmounts the given path and // deletes the remaining directory if successful. +// TODO: Remove this function and change callers to call mount pkg directly func UnmountPath(mountPath string, mounter mount.Interface) error { - return UnmountMountPoint(mountPath, mounter, false /* extensiveMountPointCheck */) + return mount.CleanupMountPoint(mountPath, mounter, false /* extensiveMountPointCheck */) } // UnmountMountPoint is a common unmount routine that unmounts the given path and @@ -137,93 +137,21 @@ func UnmountPath(mountPath string, mounter mount.Interface) error { // if extensiveMountPointCheck is true // IsNotMountPoint will be called instead of IsLikelyNotMountPoint. // IsNotMountPoint is more expensive but properly handles bind mounts. +// TODO: Change callers to call mount pkg directly func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error { - pathExists, pathErr := PathExists(mountPath) - if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath) - return nil - } - corruptedMnt := IsCorruptedMnt(pathErr) - if pathErr != nil && !corruptedMnt { - return fmt.Errorf("Error checking path: %v", pathErr) - } - return doUnmountMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt) -} - -// doUnmountMountPoint is a common unmount routine that unmounts the given path and -// deletes the remaining directory if successful. -// if extensiveMountPointCheck is true -// IsNotMountPoint will be called instead of IsLikelyNotMountPoint. -// IsNotMountPoint is more expensive but properly handles bind mounts. -// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, Take it as an argument for convenience of testing -func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool, corruptedMnt bool) error { - if !corruptedMnt { - var notMnt bool - var err error - if extensiveMountPointCheck { - notMnt, err = mount.IsNotMountPoint(mounter, mountPath) - } else { - notMnt, err = mounter.IsLikelyNotMountPoint(mountPath) - } - - if err != nil { - return err - } - - if notMnt { - glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) - return os.Remove(mountPath) - } - } - - // Unmount the mount path - glog.V(4).Infof("%q is a mountpoint, unmounting", mountPath) - if err := mounter.Unmount(mountPath); err != nil { - return err - } - notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath) - if mntErr != nil { - return mntErr - } - if notMnt { - glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath) - return os.Remove(mountPath) - } - return fmt.Errorf("Failed to unmount path %v", mountPath) + return mount.CleanupMountPoint(mountPath, mounter, extensiveMountPointCheck) } // PathExists returns true if the specified path exists. +// TODO: Change callers to call mount pkg directly func PathExists(path string) (bool, error) { - _, err := os.Stat(path) - if err == nil { - return true, nil - } else if os.IsNotExist(err) { - return false, nil - } else if IsCorruptedMnt(err) { - return true, err - } else { - return false, err - } + return mount.PathExists(path) } // IsCorruptedMnt return true if err is about corrupted mount point +// TODO: Change callers to call mount pkg directly func IsCorruptedMnt(err error) bool { - if err == nil { - return false - } - var underlyingError error - switch pe := err.(type) { - case nil: - return false - case *os.PathError: - underlyingError = pe.Err - case *os.LinkError: - underlyingError = pe.Err - case *os.SyscallError: - underlyingError = pe.Err - } - - return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO + return mount.IsCorruptedMnt(err) } // GetSecretForPod locates secret by name in the pod's namespace and returns secret map @@ -261,6 +189,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl return secret, nil } +// GetClassForVolume locates storage class by persistent volume func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) (*storage.StorageClass, error) { if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") @@ -290,7 +219,7 @@ func checkVolumeNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]stri if pv.Spec.NodeAffinity.Required != nil { terms := pv.Spec.NodeAffinity.Required.NodeSelectorTerms - glog.V(10).Infof("Match for Required node selector terms %+v", terms) + klog.V(10).Infof("Match for Required node selector terms %+v", terms) if !v1helper.MatchNodeSelectorTerms(terms, labels.Set(nodeLabels), nil) { return fmt.Errorf("No matching NodeSelectorTerms") } @@ -379,11 +308,11 @@ func SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent bool, zone } // scheduler will guarantee if node != null above, zoneFromNode is member of allowedZones. // so if zoneFromNode != "", we can safely assume it is part of allowedZones. - if zones, err := chooseZonesForVolumeIncludingZone(allowedZones, pvcName, zoneFromNode, numReplicas); err != nil { + zones, err := chooseZonesForVolumeIncludingZone(allowedZones, pvcName, zoneFromNode, numReplicas) + if err != nil { return nil, fmt.Errorf("cannot process zones in allowedTopologies: %v", err) - } else { - return zones, nil } + return zones, nil } // pick zone from parameters if present @@ -405,11 +334,11 @@ func SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent bool, zone // pick zone from zones with nodes if zonesWithNodes.Len() > 0 { // If node != null (and thus zoneFromNode != ""), zoneFromNode will be member of zonesWithNodes - if zones, err := chooseZonesForVolumeIncludingZone(zonesWithNodes, pvcName, zoneFromNode, numReplicas); err != nil { + zones, err := chooseZonesForVolumeIncludingZone(zonesWithNodes, pvcName, zoneFromNode, numReplicas) + if err != nil { return nil, fmt.Errorf("cannot process zones where nodes exist in the cluster: %v", err) - } else { - return zones, nil } + return zones, nil } return nil, fmt.Errorf("cannot determine zones to provision volume in") } @@ -431,6 +360,7 @@ func ZonesFromAllowedTopologies(allowedTopologies []v1.TopologySelectorTerm) (se return zones, nil } +// ZonesSetToLabelValue converts zones set to label value func ZonesSetToLabelValue(strSet sets.String) string { return strings.Join(strSet.UnsortedList(), kubeletapis.LabelMultiZoneDelimiter) } @@ -511,7 +441,7 @@ func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.Pers func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 { roundedUp := volumeSizeBytes / allocationUnitBytes if volumeSizeBytes%allocationUnitBytes > 0 { - roundedUp += 1 + roundedUp++ } return roundedUp } @@ -605,7 +535,7 @@ func ChooseZoneForVolume(zones sets.String, pvcName string) string { zoneSlice := zones.List() zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))] - glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice) + klog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice) return zone } @@ -664,7 +594,7 @@ func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) se replicaZones.Insert(zone) } - glog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q", + klog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q", pvcName, replicaZones.UnsortedList(), zoneSlice) return replicaZones } @@ -672,7 +602,7 @@ func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) se func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) { if pvcName == "" { // We should always be called with a name; this shouldn't happen - glog.Warningf("No name defined during volume create; choosing random zone") + klog.Warningf("No name defined during volume create; choosing random zone") hash = rand.Uint32() } else { @@ -708,7 +638,7 @@ func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) { hashString = hashString[lastDash+1:] } - glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index) + klog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index) } } @@ -724,7 +654,7 @@ func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) { // UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi // to empty_dir func UnmountViaEmptyDir(dir string, host volume.VolumeHost, volName string, volSpec volume.Spec, podUID utypes.UID) error { - glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir) + klog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir) // Wrap EmptyDir, let it do the teardown. wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID) @@ -766,7 +696,7 @@ func JoinMountOptions(userOptions []string, systemOptions []string) []string { for _, mountOption := range systemOptions { allMountOptions.Insert(mountOption) } - return allMountOptions.UnsortedList() + return allMountOptions.List() } // ValidateZone returns: @@ -823,9 +753,10 @@ func GetUniqueVolumeName(pluginName, volumeName string) v1.UniqueVolumeName { return v1.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName)) } -// GetUniqueVolumeNameForNonAttachableVolume returns the unique volume name -// for a non-attachable volume. -func GetUniqueVolumeNameForNonAttachableVolume( +// GetUniqueVolumeNameFromSpecWithPod returns a unique volume name with pod +// name included. This is useful to generate different names for different pods +// on same volume. +func GetUniqueVolumeNameFromSpecWithPod( podName types.UniquePodName, volumePlugin volume.VolumePlugin, volumeSpec *volume.Spec) v1.UniqueVolumeName { return v1.UniqueVolumeName( fmt.Sprintf("%s/%v-%s", volumePlugin.GetPluginName(), podName, volumeSpec.Name())) @@ -945,6 +876,38 @@ func CheckPersistentVolumeClaimModeBlock(pvc *v1.PersistentVolumeClaim) bool { return utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock } +// IsWindowsUNCPath checks if path is prefixed with \\ +// This can be used to skip any processing of paths +// that point to SMB shares, local named pipes and local UNC path +func IsWindowsUNCPath(goos, path string) bool { + if goos != "windows" { + return false + } + // Check for UNC prefix \\ + if strings.HasPrefix(path, `\\`) { + return true + } + return false +} + +// IsWindowsLocalPath checks if path is a local path +// prefixed with "/" or "\" like "/foo/bar" or "\foo\bar" +func IsWindowsLocalPath(goos, path string) bool { + if goos != "windows" { + return false + } + if IsWindowsUNCPath(goos, path) { + return false + } + if strings.Contains(path, ":") { + return false + } + if !(strings.HasPrefix(path, `/`) || strings.HasPrefix(path, `\`)) { + return false + } + return true +} + // MakeAbsolutePath convert path to absolute path according to GOOS func MakeAbsolutePath(goos, path string) string { if goos != "windows" { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/BUILD.bazel index 8f4ff83b5214b..5727e4e1e9680 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/BUILD.bazel +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/BUILD.bazel @@ -11,7 +11,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/util/volumepathhandler", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go b/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go index 61680c11577ee..a7822efc3e556 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go @@ -23,7 +23,7 @@ import ( "path" "path/filepath" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" ) @@ -86,14 +86,14 @@ func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName if !filepath.IsAbs(mapPath) { return fmt.Errorf("The map path should be absolute: map path: %s", mapPath) } - glog.V(5).Infof("MapDevice: devicePath %s", devicePath) - glog.V(5).Infof("MapDevice: mapPath %s", mapPath) - glog.V(5).Infof("MapDevice: linkName %s", linkName) + klog.V(5).Infof("MapDevice: devicePath %s", devicePath) + klog.V(5).Infof("MapDevice: mapPath %s", mapPath) + klog.V(5).Infof("MapDevice: linkName %s", linkName) // Check and create mapPath _, err := os.Stat(mapPath) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate map path: %s", mapPath) + klog.Errorf("cannot validate map path: %s", mapPath) return err } if err = os.MkdirAll(mapPath, 0750); err != nil { @@ -115,15 +115,15 @@ func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error { if len(mapPath) == 0 { return fmt.Errorf("Failed to unmap device from map path. mapPath is empty") } - glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath) - glog.V(5).Infof("UnmapDevice: linkName %s", linkName) + klog.V(5).Infof("UnmapDevice: mapPath %s", mapPath) + klog.V(5).Infof("UnmapDevice: linkName %s", linkName) // Check symbolic link exists linkPath := path.Join(mapPath, string(linkName)) if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil { return checkErr } else if !islinkExist { - glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath) + klog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath) return nil } err := os.Remove(linkPath) @@ -135,7 +135,7 @@ func (v VolumePathHandler) RemoveMapPath(mapPath string) error { if len(mapPath) == 0 { return fmt.Errorf("Failed to remove map path. mapPath is empty") } - glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath) + klog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath) err := os.RemoveAll(mapPath) if err != nil && !os.IsNotExist(err) { return err @@ -180,12 +180,12 @@ func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) if err != nil { return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err) } - glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath) + klog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath) if filepath == devPath { refs = append(refs, path.Join(mapPath, filename)) } } - glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs) + klog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs) return refs, nil } @@ -201,7 +201,7 @@ func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath strin return err } if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) { - glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath) + klog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath) if res, err := compareSymlinks(path, mapPath); err == nil && res { globalMapPathUUID = path } @@ -211,7 +211,7 @@ func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath strin if err != nil { return "", err } - glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID) + klog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID) // Return path contains global map path + {pod uuid} return globalMapPathUUID, nil } @@ -225,7 +225,7 @@ func compareSymlinks(global, pod string) (bool, error) { if err != nil { return false, err } - glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod) + klog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod) if devGlobal == devPod { return true, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go index f9a886d7dc64c..7170edc7de007 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go @@ -25,7 +25,7 @@ import ( "os/exec" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) // AttachFileDevice takes a path to a regular file and makes it available as an @@ -38,7 +38,7 @@ func (v VolumePathHandler) AttachFileDevice(path string) (string, error) { // If no existing loop device for the path, create one if blockDevicePath == "" { - glog.V(4).Infof("Creating device for path: %s", path) + klog.V(4).Infof("Creating device for path: %s", path) blockDevicePath, err = makeLoopDevice(path) if err != nil { return "", err @@ -61,7 +61,7 @@ func (v VolumePathHandler) GetLoopDevice(path string) (string, error) { cmd := exec.Command(losetupPath, args...) out, err := cmd.CombinedOutput() if err != nil { - glog.V(2).Infof("Failed device discover command for path %s: %v %s", path, err, out) + klog.V(2).Infof("Failed device discover command for path %s: %v %s", path, err, out) return "", err } return parseLosetupOutputForDevice(out) @@ -72,7 +72,7 @@ func makeLoopDevice(path string) (string, error) { cmd := exec.Command(losetupPath, args...) out, err := cmd.CombinedOutput() if err != nil { - glog.V(2).Infof("Failed device create command for path: %s %v %s ", path, err, out) + klog.V(2).Infof("Failed device create command for path: %s %v %s ", path, err, out) return "", err } return parseLosetupOutputForDevice(out) @@ -87,7 +87,7 @@ func (v VolumePathHandler) RemoveLoopDevice(device string) error { if _, err := os.Stat(device); os.IsNotExist(err) { return nil } - glog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out) + klog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out) return err } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go index ef1f45208c95f..eb44d5f162fad 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go @@ -24,7 +24,7 @@ import ( "os" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -63,13 +63,13 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error { } if stat == nil { - glog.Errorf("Got nil stat_t for path %v while setting ownership of volume", path) + klog.Errorf("Got nil stat_t for path %v while setting ownership of volume", path) return nil } err = os.Chown(path, int(stat.Uid), int(*fsGroup)) if err != nil { - glog.Errorf("Chown failed on %v: %v", path, err) + klog.Errorf("Chown failed on %v: %v", path, err) } mask := rwMask @@ -83,7 +83,7 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error { err = os.Chmod(path, info.Mode()|mask) if err != nil { - glog.Errorf("Chmod failed on %v: %v", path, err) + klog.Errorf("Chmod failed on %v: %v", path, err) } return nil diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/cloud-provider/LICENSE b/vendor/k8s.io/kubernetes/staging/src/k8s.io/cloud-provider/LICENSE new file mode 100644 index 0000000000000..8dada3edaf50d --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/cloud-provider/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/cluster-bootstrap/LICENSE b/vendor/k8s.io/kubernetes/staging/src/k8s.io/cluster-bootstrap/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/cluster-bootstrap/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kubernetes/third_party/forked/godep/License b/vendor/k8s.io/kubernetes/third_party/forked/godep/License new file mode 100644 index 0000000000000..5c52e6867c86c --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/forked/godep/License @@ -0,0 +1,28 @@ +Copyright © 2013 Keith Rarick. +Portions Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/kubernetes/third_party/go-srcimporter/LICENSE b/vendor/k8s.io/kubernetes/third_party/go-srcimporter/LICENSE new file mode 100644 index 0000000000000..6a66aea5eafe0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/go-srcimporter/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/kubernetes/third_party/multiarch/qemu-user-static/LICENSE b/vendor/k8s.io/kubernetes/third_party/multiarch/qemu-user-static/LICENSE new file mode 100644 index 0000000000000..e9658e820b8f1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/multiarch/qemu-user-static/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015-2016 Manfred Touron + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go index 9437a13f8af62..0f970b0cebcc7 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=metrics.k8s.io + package metrics diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go index 6bd8967b40220..3c6b4b4182cfd 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -342,24 +341,6 @@ func (m *PodMetricsList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -637,51 +618,14 @@ func (m *ContainerMetrics) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Usage == nil { m.Usage = make(k8s_io_api_core_v1.ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -691,46 +635,85 @@ func (m *ContainerMetrics) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = mapvalue } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -898,51 +881,14 @@ func (m *NodeMetrics) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Usage == nil { m.Usage = make(k8s_io_api_core_v1.ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -952,46 +898,85 @@ func (m *NodeMetrics) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = mapvalue } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go index 4d287a29ec81d..d2166bfe557ce 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -342,24 +341,6 @@ func (m *PodMetricsList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -637,51 +618,14 @@ func (m *ContainerMetrics) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Usage == nil { m.Usage = make(k8s_io_api_core_v1.ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -691,46 +635,85 @@ func (m *ContainerMetrics) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = mapvalue } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -898,51 +881,14 @@ func (m *NodeMetrics) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Usage == nil { m.Usage = make(k8s_io_api_core_v1.ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -952,46 +898,85 @@ func (m *NodeMetrics) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = mapvalue } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go index 0f88e58605ad7..7b594d73d3391 100644 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" @@ -66,10 +68,15 @@ func (c *nodeMetricses) Get(name string, options v1.GetOptions) (result *v1alpha // List takes label and field selectors, and returns the list of NodeMetricses that match those selectors. func (c *nodeMetricses) List(opts v1.ListOptions) (result *v1alpha1.NodeMetricsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.NodeMetricsList{} err = c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -77,9 +84,14 @@ func (c *nodeMetricses) List(opts v1.ListOptions) (result *v1alpha1.NodeMetricsL // Watch returns a watch.Interface that watches the requested nodeMetricses. func (c *nodeMetricses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go index e11182eb74132..e6fb6b5f60f84 100644 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" @@ -69,11 +71,16 @@ func (c *podMetricses) Get(name string, options v1.GetOptions) (result *v1alpha1 // List takes label and field selectors, and returns the list of PodMetricses that match those selectors. func (c *podMetricses) List(opts v1.ListOptions) (result *v1alpha1.PodMetricsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.PodMetricsList{} err = c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -81,10 +88,15 @@ func (c *podMetricses) List(opts v1.ListOptions) (result *v1alpha1.PodMetricsLis // Watch returns a watch.Interface that watches the requested podMetricses. func (c *podMetricses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go index 845914133ccb1..5b1dd89a12ce1 100644 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" @@ -66,10 +68,15 @@ func (c *nodeMetricses) Get(name string, options v1.GetOptions) (result *v1beta1 // List takes label and field selectors, and returns the list of NodeMetricses that match those selectors. func (c *nodeMetricses) List(opts v1.ListOptions) (result *v1beta1.NodeMetricsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.NodeMetricsList{} err = c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -77,9 +84,14 @@ func (c *nodeMetricses) List(opts v1.ListOptions) (result *v1beta1.NodeMetricsLi // Watch returns a watch.Interface that watches the requested nodeMetricses. func (c *nodeMetricses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go index 02e4e0a0d7dc2..cdc2510c7c6c2 100644 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" @@ -69,11 +71,16 @@ func (c *podMetricses) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of PodMetricses that match those selectors. func (c *podMetricses) List(opts v1.ListOptions) (result *v1beta1.PodMetricsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.PodMetricsList{} err = c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -81,10 +88,15 @@ func (c *podMetricses) List(opts v1.ListOptions) (result *v1beta1.PodMetricsList // Watch returns a watch.Interface that watches the requested podMetricses. func (c *podMetricses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } diff --git a/vendor/k8s.io/utils/nsenter/BUILD.bazel b/vendor/k8s.io/utils/nsenter/BUILD.bazel new file mode 100644 index 0000000000000..f0e8cc6635e8c --- /dev/null +++ b/vendor/k8s.io/utils/nsenter/BUILD.bazel @@ -0,0 +1,51 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "exec.go", + "exec_unsupported.go", + "nsenter.go", + "nsenter_unsupported.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/utils/nsenter", + importpath = "k8s.io/utils/nsenter", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/k8s.io/utils/nsenter/OWNERS b/vendor/k8s.io/utils/nsenter/OWNERS new file mode 100644 index 0000000000000..c4f27cb43817e --- /dev/null +++ b/vendor/k8s.io/utils/nsenter/OWNERS @@ -0,0 +1,8 @@ +reviewers: + - jsafrane + - msau42 + - cofyc +approvers: + - jsafrane + - msau42 + - cofyc diff --git a/vendor/k8s.io/utils/nsenter/exec.go b/vendor/k8s.io/utils/nsenter/exec.go new file mode 100644 index 0000000000000..201f1270c7725 --- /dev/null +++ b/vendor/k8s.io/utils/nsenter/exec.go @@ -0,0 +1,67 @@ +// +build linux + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nsenter + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/golang/glog" + "k8s.io/utils/exec" +) + +// Executor wraps executor interface to be executed via nsenter +type Executor struct { + // Exec implementation + executor exec.Interface + // Path to the host's root proc path + hostProcMountNsPath string +} + +// NewNsenterExecutor returns new nsenter based executor +func NewNsenterExecutor(hostRootFsPath string, executor exec.Interface) *Executor { + hostProcMountNsPath := filepath.Join(hostRootFsPath, mountNsPath) + nsExecutor := &Executor{ + hostProcMountNsPath: hostProcMountNsPath, + executor: executor, + } + return nsExecutor +} + +// Command returns a command wrapped with nenter +func (nsExecutor *Executor) Command(cmd string, args ...string) exec.Cmd { + fullArgs := append([]string{fmt.Sprintf("--mount=%s", nsExecutor.hostProcMountNsPath), "--"}, + append([]string{cmd}, args...)...) + glog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) + return nsExecutor.executor.Command(nsenterPath, fullArgs...) +} + +// CommandContext returns a CommandContext wrapped with nsenter +func (nsExecutor *Executor) CommandContext(ctx context.Context, cmd string, args ...string) exec.Cmd { + fullArgs := append([]string{fmt.Sprintf("--mount=%s", nsExecutor.hostProcMountNsPath), "--"}, + append([]string{cmd}, args...)...) + glog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) + return nsExecutor.executor.CommandContext(ctx, nsenterPath, fullArgs...) +} + +// LookPath returns a LookPath wrapped with nsenter +func (nsExecutor *Executor) LookPath(file string) (string, error) { + return "", fmt.Errorf("not implemented, error looking up : %s", file) +} diff --git a/vendor/k8s.io/utils/nsenter/exec_unsupported.go b/vendor/k8s.io/utils/nsenter/exec_unsupported.go new file mode 100644 index 0000000000000..eecbdfc2921a0 --- /dev/null +++ b/vendor/k8s.io/utils/nsenter/exec_unsupported.go @@ -0,0 +1,58 @@ +// +build !linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nsenter + +import ( + "context" + "fmt" + + "k8s.io/utils/exec" +) + +// Executor wraps executor interface to be executed via nsenter +type Executor struct { + // Exec implementation + executor exec.Interface + // Path to the host's root proc path + hostProcMountNsPath string +} + +// NewNsenterExecutor returns new nsenter based executor +func NewNsenterExecutor(hostRootFsPath string, executor exec.Interface) *Executor { + nsExecutor := &Executor{ + hostProcMountNsPath: hostRootFsPath, + executor: executor, + } + return nsExecutor +} + +// Command returns a command wrapped with nenter +func (nsExecutor *Executor) Command(cmd string, args ...string) exec.Cmd { + return nil +} + +// CommandContext returns a CommandContext wrapped with nsenter +func (nsExecutor *Executor) CommandContext(ctx context.Context, cmd string, args ...string) exec.Cmd { + return nil +} + +// LookPath returns a LookPath wrapped with nsenter +func (nsExecutor *Executor) LookPath(file string) (string, error) { + return "", fmt.Errorf("not implemented, error looking up : %s", file) +} diff --git a/vendor/k8s.io/utils/nsenter/nsenter.go b/vendor/k8s.io/utils/nsenter/nsenter.go new file mode 100644 index 0000000000000..e928a57ac9fe0 --- /dev/null +++ b/vendor/k8s.io/utils/nsenter/nsenter.go @@ -0,0 +1,236 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nsenter + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "k8s.io/utils/exec" + + "github.com/golang/glog" +) + +const ( + // DefaultHostRootFsPath is path to host's filesystem mounted into container + // with kubelet. + DefaultHostRootFsPath = "/rootfs" + // mountNsPath is the default mount namespace of the host + mountNsPath = "/proc/1/ns/mnt" + // nsenterPath is the default nsenter command + nsenterPath = "nsenter" +) + +// Nsenter is part of experimental support for running the kubelet +// in a container. +// +// Nsenter requires: +// +// 1. Docker >= 1.6 due to the dependency on the slave propagation mode +// of the bind-mount of the kubelet root directory in the container. +// Docker 1.5 used a private propagation mode for bind-mounts, so mounts +// performed in the host's mount namespace do not propagate out to the +// bind-mount in this docker version. +// 2. The host's root filesystem must be available at /rootfs +// 3. The nsenter binary must be on the Kubelet process' PATH in the container's +// filesystem. +// 4. The Kubelet process must have CAP_SYS_ADMIN (required by nsenter); at +// the present, this effectively means that the kubelet is running in a +// privileged container. +// 5. The volume path used by the Kubelet must be the same inside and outside +// the container and be writable by the container (to initialize volume) +// contents. TODO: remove this requirement. +// 6. The host image must have "mount", "findmnt", "umount", "stat", "touch", +// "mkdir", "ls", "sh" and "chmod" binaries in /bin, /usr/sbin, or /usr/bin +// 7. The host image should have systemd-run in /bin, /usr/sbin, or /usr/bin if +// systemd is installed/enabled in the operating system. +// For more information about mount propagation modes, see: +// https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt +type Nsenter struct { + // a map of commands to their paths on the host filesystem + paths map[string]string + + // Path to the host filesystem, typically "/rootfs". Used only for testing. + hostRootFsPath string + + // Exec implementation, used only for testing + executor exec.Interface +} + +// NewNsenter constructs a new instance of Nsenter +func NewNsenter(hostRootFsPath string, executor exec.Interface) (*Nsenter, error) { + ne := &Nsenter{ + hostRootFsPath: hostRootFsPath, + executor: executor, + } + if err := ne.initPaths(); err != nil { + return nil, err + } + return ne, nil +} + +func (ne *Nsenter) initPaths() error { + ne.paths = map[string]string{} + binaries := []string{ + "mount", + "findmnt", + "umount", + "systemd-run", + "stat", + "touch", + "mkdir", + "sh", + "chmod", + "realpath", + } + // search for the required commands in other locations besides /usr/bin + for _, binary := range binaries { + // check for binary under the following directories + for _, path := range []string{"/", "/bin", "/usr/sbin", "/usr/bin"} { + binPath := filepath.Join(path, binary) + if _, err := os.Stat(filepath.Join(ne.hostRootFsPath, binPath)); err != nil { + continue + } + ne.paths[binary] = binPath + break + } + // systemd-run is optional, bailout if we don't find any of the other binaries + if ne.paths[binary] == "" && binary != "systemd-run" { + return fmt.Errorf("unable to find %v", binary) + } + } + return nil +} + +// Exec executes nsenter commands in hostProcMountNsPath mount namespace +func (ne *Nsenter) Exec(cmd string, args []string) exec.Cmd { + hostProcMountNsPath := filepath.Join(ne.hostRootFsPath, mountNsPath) + fullArgs := append([]string{fmt.Sprintf("--mount=%s", hostProcMountNsPath), "--"}, + append([]string{ne.AbsHostPath(cmd)}, args...)...) + glog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) + return ne.executor.Command(nsenterPath, fullArgs...) +} + +// AbsHostPath returns the absolute runnable path for a specified command +func (ne *Nsenter) AbsHostPath(command string) string { + path, ok := ne.paths[command] + if !ok { + return command + } + return path +} + +// SupportsSystemd checks whether command systemd-run exists +func (ne *Nsenter) SupportsSystemd() (string, bool) { + systemdRunPath, ok := ne.paths["systemd-run"] + return systemdRunPath, ok && systemdRunPath != "" +} + +// EvalSymlinks returns the path name on the host after evaluating symlinks on the +// host. +// mustExist makes EvalSymlinks to return error when the path does not +// exist. When it's false, it evaluates symlinks of the existing part and +// blindly adds the non-existing part: +// pathname: /mnt/volume/non/existing/directory +// /mnt/volume exists +// non/existing/directory does not exist +// -> It resolves symlinks in /mnt/volume to say /mnt/foo and returns +// /mnt/foo/non/existing/directory. +// +// BEWARE! EvalSymlinks is not able to detect symlink looks with mustExist=false! +// If /tmp/link is symlink to /tmp/link, EvalSymlinks(/tmp/link/foo) returns /tmp/link/foo. +func (ne *Nsenter) EvalSymlinks(pathname string, mustExist bool) (string, error) { + var args []string + if mustExist { + // "realpath -e: all components of the path must exist" + args = []string{"-e", pathname} + } else { + // "realpath -m: no path components need exist or be a directory" + args = []string{"-m", pathname} + } + outBytes, err := ne.Exec("realpath", args).CombinedOutput() + if err != nil { + glog.Infof("failed to resolve symbolic links on %s: %v", pathname, err) + return "", err + } + return strings.TrimSpace(string(outBytes)), nil +} + +// KubeletPath returns the path name that can be accessed by containerized +// kubelet. It is recommended to resolve symlinks on the host by EvalSymlinks +// before calling this function +func (ne *Nsenter) KubeletPath(pathname string) string { + return filepath.Join(ne.hostRootFsPath, pathname) +} + +// NewFakeNsenter returns a Nsenter that does not run "nsenter --mount=... --", +// but runs everything in the same mount namespace as the unit test binary. +// rootfsPath is supposed to be a symlink, e.g. /tmp/xyz/rootfs -> /. +// This fake Nsenter is enough for most operations, e.g. to resolve symlinks, +// but it's not enough to call /bin/mount - unit tests don't run as root. +func NewFakeNsenter(rootfsPath string) (*Nsenter, error) { + executor := &fakeExec{ + rootfsPath: rootfsPath, + } + // prepare /rootfs/bin, usr/bin and usr/sbin + bin := filepath.Join(rootfsPath, "bin") + if err := os.Symlink("/bin", bin); err != nil { + return nil, err + } + + usr := filepath.Join(rootfsPath, "usr") + if err := os.Mkdir(usr, 0755); err != nil { + return nil, err + } + usrbin := filepath.Join(usr, "bin") + if err := os.Symlink("/usr/bin", usrbin); err != nil { + return nil, err + } + usrsbin := filepath.Join(usr, "sbin") + if err := os.Symlink("/usr/sbin", usrsbin); err != nil { + return nil, err + } + + return NewNsenter(rootfsPath, executor) +} + +type fakeExec struct { + rootfsPath string +} + +func (f fakeExec) Command(cmd string, args ...string) exec.Cmd { + // This will intentionaly panic if Nsenter does not provide enough arguments. + realCmd := args[2] + realArgs := args[3:] + return exec.New().Command(realCmd, realArgs...) +} + +func (fakeExec) LookPath(file string) (string, error) { + return "", errors.New("not implemented") +} + +func (fakeExec) CommandContext(ctx context.Context, cmd string, args ...string) exec.Cmd { + return nil +} + +var _ exec.Interface = fakeExec{} diff --git a/vendor/k8s.io/utils/nsenter/nsenter_unsupported.go b/vendor/k8s.io/utils/nsenter/nsenter_unsupported.go new file mode 100644 index 0000000000000..0618b9da46954 --- /dev/null +++ b/vendor/k8s.io/utils/nsenter/nsenter_unsupported.go @@ -0,0 +1,56 @@ +// +build !linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nsenter + +import ( + "k8s.io/utils/exec" +) + +const ( + // DefaultHostRootFsPath is path to host's filesystem mounted into container + // with kubelet. + DefaultHostRootFsPath = "/rootfs" +) + +// Nsenter is part of experimental support for running the kubelet +// in a container. +type Nsenter struct { + // a map of commands to their paths on the host filesystem + Paths map[string]string +} + +// NewNsenter constructs a new instance of Nsenter +func NewNsenter(hostRootFsPath string, executor exec.Interface) (*Nsenter, error) { + return &Nsenter{}, nil +} + +// Exec executes nsenter commands in hostProcMountNsPath mount namespace +func (ne *Nsenter) Exec(cmd string, args []string) exec.Cmd { + return nil +} + +// AbsHostPath returns the absolute runnable path for a specified command +func (ne *Nsenter) AbsHostPath(command string) string { + return "" +} + +// SupportsSystemd checks whether command systemd-run exists +func (ne *Nsenter) SupportsSystemd() (string, bool) { + return "", false +} diff --git a/vendor/sigs.k8s.io/yaml/.gitignore b/vendor/sigs.k8s.io/yaml/.gitignore new file mode 100644 index 0000000000000..e256a31e00a52 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/.gitignore @@ -0,0 +1,20 @@ +# OSX leaves these everywhere on SMB shares +._* + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# Go test binaries +*.test diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml new file mode 100644 index 0000000000000..03ddc7318ae6a --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/.travis.yml @@ -0,0 +1,14 @@ +language: go +dist: xenial +go: + - 1.9.x + - 1.10.x + - 1.11.x +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON) + - go tool vet . + - go test -v -race ./... +install: + - go get golang.org/x/lint/golint diff --git a/vendor/sigs.k8s.io/yaml/BUILD.bazel b/vendor/sigs.k8s.io/yaml/BUILD.bazel new file mode 100644 index 0000000000000..aba826b249de7 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "fields.go", + "yaml.go", + "yaml_go110.go", + ], + importmap = "k8s.io/kops/vendor/sigs.k8s.io/yaml", + importpath = "sigs.k8s.io/yaml", + visibility = ["//visibility:public"], + deps = ["//vendor/gopkg.in/yaml.v2:go_default_library"], +) diff --git a/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md new file mode 100644 index 0000000000000..de4711513724d --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + + diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE new file mode 100644 index 0000000000000..7805d36de7305 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS new file mode 100644 index 0000000000000..11ad7ce1a40b5 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -0,0 +1,25 @@ +approvers: +- dims +- lavalamp +- smarterclayton +- deads2k +- sttts +- liggitt +- caesarxuchao +reviewers: +- dims +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- gmarek +- sttts +- ncdc +- tallclair +labels: +- sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md new file mode 100644 index 0000000000000..0200f75b4d126 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/README.md @@ -0,0 +1,121 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/sigs.k8s.io/yaml/RELEASE.md b/vendor/sigs.k8s.io/yaml/RELEASE.md new file mode 100644 index 0000000000000..6b642464e5839 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/RELEASE.md @@ -0,0 +1,9 @@ +# Release Process + +The `yaml` Project is released on an as-needed basis. The process is as follows: + +1. An issue is proposing a new release with a changelog since the last release +1. All [OWNERS](OWNERS) must LGTM this release +1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` +1. The release issue is closed +1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS new file mode 100644 index 0000000000000..0648a8ebff7bf --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS @@ -0,0 +1,17 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +cjcullen +jessfraz +liggitt +philips +tallclair diff --git a/vendor/sigs.k8s.io/yaml/code-of-conduct.md b/vendor/sigs.k8s.io/yaml/code-of-conduct.md new file mode 100644 index 0000000000000..0d15c00cf3252 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go new file mode 100644 index 0000000000000..235b7f2cf6129 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/fields.go @@ -0,0 +1,502 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go new file mode 100644 index 0000000000000..024596112ac51 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/yaml.go @@ -0,0 +1,319 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshal marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// JSONOpt is a decoding option for decoding from JSON format. +type JSONOpt func(*json.Decoder) *json.Decoder + +// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, +// optionally configuring the behavior of the JSON unmarshal. +func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { + return yamlUnmarshal(y, o, false, opts...) +} + +// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal +// into an object, optionally configuring the behavior of the JSON unmarshal. +func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { + return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...) +} + +// yamlUnmarshal unmarshals the given YAML byte stream into the given interface, +// optionally performing the unmarshalling strictly +func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error { + vo := reflect.ValueOf(o) + unmarshalFn := yaml.Unmarshal + if strict { + unmarshalFn = yaml.UnmarshalStrict + } + j, err := yamlToJSON(y, &vo, unmarshalFn) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the +// object, optionally applying decoder options prior to decoding. We are not +// using json.Unmarshal directly as we want the chance to pass in non-default +// options. +func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { + d := json.NewDecoder(r) + for _, opt := range opts { + d = opt(d) + } + if err := d.Decode(&o); err != nil { + return fmt.Errorf("while decoding JSON: %v", err) + } + return nil +} + +// JSONToYAML Converts JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, +// passing JSON through this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +// +// For strict decoding of YAML, use YAMLToJSONStrict. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil, yaml.Unmarshal) +} + +// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, +// returning an error on any duplicate field names. +func YAMLToJSONStrict(y []byte) ([]byte, error) { + return yamlToJSON(y, nil, yaml.UnmarshalStrict) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yamlUnmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } +} diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go new file mode 100644 index 0000000000000..ab3e06a222a6d --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/yaml_go110.go @@ -0,0 +1,14 @@ +// This file contains changes that are only compatible with go 1.10 and onwards. + +// +build go1.10 + +package yaml + +import "encoding/json" + +// DisallowUnknownFields configures the JSON decoder to error out if unknown +// fields come along, instead of dropping them by default. +func DisallowUnknownFields(d *json.Decoder) *json.Decoder { + d.DisallowUnknownFields() + return d +}